hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1daee0c91aad8b50c4b72bda2d5d6e4ec98d2423 | 4,399 | py | Python | subprojects/python3_gen_engine/src/x.py | xhd2015/rsp3-armv8-baremetal | f7e2ac04abd3be20daa94ad9c7ad2c1bad5159b0 | [
"MIT"
] | 21 | 2018-03-14T09:45:26.000Z | 2021-09-13T01:13:27.000Z | subprojects/python3_gen_engine/src/x.py | xhd2015/rsp3-armv8-baremetal | f7e2ac04abd3be20daa94ad9c7ad2c1bad5159b0 | [
"MIT"
] | null | null | null | subprojects/python3_gen_engine/src/x.py | xhd2015/rsp3-armv8-baremetal | f7e2ac04abd3be20daa94ad9c7ad2c1bad5159b0 | [
"MIT"
] | 4 | 2018-03-18T11:56:09.000Z | 2021-02-04T16:26:52.000Z | # -*-encoding:utf8-*-
from support import *
hasMultiFields = (len(fields)>1)
ori_fields=fields
fields=processedFields(ori_fields)
qualScaleType = scale_type
qualTemplateSpecArgs=""
qualTemplateAssert=""
if templateSpecArgs is not None:
qualTemplateSpecArgs="<"+templateSpecArgs+">"
if templateArgs is not None:
out.write('''template <{templateArgs}>
'''.format(**locals()))
out.write('''
class {name} {qualTemplateSpecArgs}
'''.format(**locals()))
if extends is not None:
out.write(''' :public {extends}
'''.format(**locals()))
out.write('''{{
public:
'''.format(**locals()))
if templateAssert is not None:
cond=templateAssert[0]
err=templateAssert[1]
out.write(''' static_assert({cond},"{err}");
'''.format(**locals()))
out.write(''' using ScaleType={scale_type};
'''.format(**locals()))
for enumName,enumVal in enums:
out.write(''' enum {enumName} {{
'''.format(**locals()))
for val in enumVal:
out.write(''' {val},
'''.format(**locals()))
out.write(''' }};
'''.format(**locals()))
if hasMultiFields:
out.write(''' union {{
'''.format(**locals()))
for struct_name,field in fields:
if hasMultiFields:
out.write(''' struct {{
'''.format(**locals()))
for i in range(0,len(field),2):
fd=field[i]
bits=str(field[i+1])
out.write(''' {qualScaleType} {fd}:{bits};
'''.format(**locals()))
if hasMultiFields:
out.write(''' }}__attribute__((packed)) {struct_name};
'''.format(**locals()))
if hasMultiFields:
out.write(''' }}; //union
'''.format(**locals()))
out.write('''
'''.format(**locals()))
qualName = name
out.write(''' AS_MACRO {name}({scale_type} v) {{set(v);}}
AS_MACRO {name}& operator=({scale_type} v) {{set(v);return *this;}}
AS_MACRO void set({scale_type} v) {{ *reinterpret_cast<{scale_type}*>(this)=v;}}
AS_MACRO {scale_type} get()const
{{
return *reinterpret_cast<{scale_type}*>(this);
}}
'''.format(**locals()))
if has_read:
out.write(''' AS_MACRO void dump()const
{{
kout << "{name}: ";
'''.format(**locals()))
applyIndex=-1
for struct_name,field in fields:
applyIndex+=1
if hasMultiFields:
if applies[applyIndex]:
ifCond = "if("+applies[applyIndex]+")"
else:
ifCond = "else"
out.write(''' {ifCond}
{{
'''.format(**locals()))
out.write(''' kout
'''.format(**locals()))
for i in range(0,len(field),2):
fd=field[i]
if hasMultiFields:
fd=struct_name+"."+fd
features=ori_fields[applyIndex][1][i].split(",")
if len(features)>1:
features=features[1:]
if len(features)>1:
raise Exception("Currently Unsupported Feature Number more than 1\n")
transferedFd = features[0].strip()+"("+fd+")"
else:
transferedFd = fd
out.write(''' << "{fd} = " << {transferedFd} << ", "
'''.format(**locals()))
out.write(''' << "\\n";
'''.format(**locals()))
if hasMultiFields:
out.write(''' }}
'''.format(**locals()))
out.write(''' }}
'''.format(**locals()))
if has_read:
out.write(''' AS_MACRO static {name} read()
{{
{name} res;
__asm__ __volatile__("mrs %0,{sys_reg_name}\\n\\t":"=r"(res));
return res;
}}
AS_MACRO {name} & update()
{{
__asm__ __volatile__("mrs %0,{sys_reg_name}\\n\\t":"=r"(*this));
return *this;
}}
'''.format(**locals()))
if has_write and not hasMultiFields:
out.write(''' AS_MACRO {qualName} & setMandatoryFields()
{{
'''.format(**locals()))
for i in range(0,len(field),2):
fd=field[i]
v = resValue(ori_fields[0][1][i])
if v==1:
value=1
else:
value=0
out.write(''' {fd} = {value};
'''.format(**locals()))
out.write(''' return *this;
}}
'''.format(**locals()))
if has_write:
out.write(''' AS_MACRO void write()const
{{
__asm__ __volatile__("msr {sys_reg_name},%0\\n\\t"::"r"(*this));
}}
'''.format(**locals()))
out.write('''}}__attribute__((packed));
'''.format(**locals()))
nullptr
s3_0_c12_8_3
| 29.722973 | 89 | 0.53353 |
67fe1b83e0a2557e1de69ca90c361bebf5bac028 | 407 | py | Python | app/infrastructure/abstract_repository.py | pvsfair/architecture-patterns-python | 3959cbeae8cb9083d4a7a798bd389cf6d8cd6502 | [
"MIT"
] | null | null | null | app/infrastructure/abstract_repository.py | pvsfair/architecture-patterns-python | 3959cbeae8cb9083d4a7a798bd389cf6d8cd6502 | [
"MIT"
] | null | null | null | app/infrastructure/abstract_repository.py | pvsfair/architecture-patterns-python | 3959cbeae8cb9083d4a7a798bd389cf6d8cd6502 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import List
from app.domain.models.Batch import Batch
class AbstractRepository(ABC):
@abstractmethod
def add(self, batch: Batch):
raise NotImplementedError
@abstractmethod
def get(self, reference) -> Batch:
raise NotImplementedError
@abstractmethod
def list(self) -> List[Batch]:
raise NotImplementedError
| 21.421053 | 41 | 0.710074 |
0981df23baad1e3539b52fe250ded26d372793bc | 31 | py | Python | dttpy/__init__.py | neouniverse/dttpy | c5ff8870d796d84b39c4e6f82ec4eefe523cc3e7 | [
"MIT"
] | null | null | null | dttpy/__init__.py | neouniverse/dttpy | c5ff8870d796d84b39c4e6f82ec4eefe523cc3e7 | [
"MIT"
] | null | null | null | dttpy/__init__.py | neouniverse/dttpy | c5ff8870d796d84b39c4e6f82ec4eefe523cc3e7 | [
"MIT"
] | null | null | null | #
from .dttdata import DttData
| 10.333333 | 28 | 0.774194 |
934165304eb5ff95c3669863b15209c8b18ca3f6 | 2,702 | py | Python | webserver/python2.7/site-packages/joblib/testing.py | maxr1876/Radix | bf9a5470908ea0823c8398565086b1e6b960c73b | [
"BSD-2-Clause"
] | 4 | 2018-07-04T17:20:12.000Z | 2019-07-14T18:07:25.000Z | webserver/python2.7/site-packages/joblib/testing.py | maxr1876/Radix | bf9a5470908ea0823c8398565086b1e6b960c73b | [
"BSD-2-Clause"
] | null | null | null | webserver/python2.7/site-packages/joblib/testing.py | maxr1876/Radix | bf9a5470908ea0823c8398565086b1e6b960c73b | [
"BSD-2-Clause"
] | 1 | 2018-09-03T03:02:06.000Z | 2018-09-03T03:02:06.000Z | """
Helper for testing.
"""
import sys
import warnings
import os.path
import re
import subprocess
import threading
from joblib._compat import PY3_OR_LATER
def warnings_to_stdout():
""" Redirect all warnings to stdout.
"""
showwarning_orig = warnings.showwarning
def showwarning(msg, cat, fname, lno, file=None, line=0):
showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout)
warnings.showwarning = showwarning
#warnings.simplefilter('always')
try:
from nose.tools import assert_raises_regex
except ImportError:
# For Python 2.7
try:
from nose.tools import assert_raises_regexp as assert_raises_regex
except ImportError:
# for Python 2.6
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
def check_subprocess_call(cmd, timeout=1, stdout_regex=None):
"""Runs a command in a subprocess with timeout in seconds.
Also checks returncode is zero and stdout if stdout_regex is set.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def kill_process():
proc.kill()
timer = threading.Timer(timeout, kill_process)
try:
timer.start()
stdout, stderr = proc.communicate()
if PY3_OR_LATER:
stdout, stderr = stdout.decode(), stderr.decode()
if proc.returncode != 0:
message = (
'Non-zero return code: {0}.\nStdout:\n{1}\n'
'Stderr:\n{2}').format(
proc.returncode, stdout, stderr)
raise ValueError(message)
if (stdout_regex is not None and
not re.search(stdout_regex, stdout)):
raise ValueError(
"Unexpected output: '{0!r}' does not match:\n{1!r}".format(
stdout_regex, stdout))
finally:
timer.cancel()
| 31.418605 | 78 | 0.583642 |
82a7436a9572d857d1ef96ba2dc15f66dace36cc | 1,049 | py | Python | portal/permissions.py | rajexp/quizzer | b88a7f2fd68d2e664688a006e40ae1c42c713254 | [
"Apache-2.0"
] | 1 | 2017-10-24T16:13:37.000Z | 2017-10-24T16:13:37.000Z | portal/permissions.py | rajexp/quizzer | b88a7f2fd68d2e664688a006e40ae1c42c713254 | [
"Apache-2.0"
] | null | null | null | portal/permissions.py | rajexp/quizzer | b88a7f2fd68d2e664688a006e40ae1c42c713254 | [
"Apache-2.0"
] | null | null | null | from rest_framework import permissions
from django.contrib.auth.models import User
class IsStaffOrTargetUser(permissions.BasePermission):
def has_permission(self, request, view):
# allow user to list all users if logged in user is staff
return view.action == 'retrieve' or request.user.is_authenticated
def has_object_permission(self, request, view, obj):
# allow logged in user to view own details, allows staff to view all records
return request.user.is_staff or (obj if isinstance(obj,User) else obj.user)== request.user
class IsAdminOrIsSelf(permissions.BasePermission):
def has_permission(self, request, view):
# allow user to list all users if logged in user is staff
return view.action == 'retrieve' or request.user.is_authenticated
def has_object_permission(self, request, view, obj):
# allow logged in user to view own details, allows staff to view all records
return request.user.is_admin or (obj if isinstance(obj,User) else obj.user)== request.user | 49.952381 | 98 | 0.734032 |
7dfd4403e2d091fc2fec6b8d8e0fb20d46807565 | 645 | py | Python | jobs/migrations/0014_auto_20161104_1732.py | kevinvargasp/my_proyecto | 2f1534cbcaf6fa50b52924dd494705d21cbdf0f6 | [
"Apache-2.0"
] | null | null | null | jobs/migrations/0014_auto_20161104_1732.py | kevinvargasp/my_proyecto | 2f1534cbcaf6fa50b52924dd494705d21cbdf0f6 | [
"Apache-2.0"
] | null | null | null | jobs/migrations/0014_auto_20161104_1732.py | kevinvargasp/my_proyecto | 2f1534cbcaf6fa50b52924dd494705d21cbdf0f6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-04 21:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0013_remove_profilejob_state'),
]
operations = [
migrations.AlterField(
model_name='job',
name='lat',
field=models.CharField(max_length=50, verbose_name=b'Latitud'),
),
migrations.AlterField(
model_name='job',
name='lng',
field=models.CharField(max_length=50, verbose_name=b'Longitud'),
),
]
| 24.807692 | 76 | 0.6 |
cbf946b518f6db39f2b8ee5142fecc6baa19550c | 308 | py | Python | modularwebapplication/conftest.py | ScastrillonE/modularwebapp | 2635bfbb6984d5966651010d547f4537ac6f6860 | [
"MIT"
] | null | null | null | modularwebapplication/conftest.py | ScastrillonE/modularwebapp | 2635bfbb6984d5966651010d547f4537ac6f6860 | [
"MIT"
] | 23 | 2021-12-23T06:24:05.000Z | 2022-03-31T06:27:16.000Z | modularwebapplication/conftest.py | ScastrillonE/modularwebapp | 2635bfbb6984d5966651010d547f4537ac6f6860 | [
"MIT"
] | null | null | null | import pytest
from modularwebapplication.users.models import User
from modularwebapplication.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| 20.533333 | 67 | 0.795455 |
407ce5a7283ab7ee11bf52913607d54fadfe94b2 | 1,866 | py | Python | src/main/resources/meetup/Client.py | zvercodebender/xlr-meetup-plugin | 5043c8bb1248bed237d71e5183199c9160e8684c | [
"MIT"
] | null | null | null | src/main/resources/meetup/Client.py | zvercodebender/xlr-meetup-plugin | 5043c8bb1248bed237d71e5183199c9160e8684c | [
"MIT"
] | null | null | null | src/main/resources/meetup/Client.py | zvercodebender/xlr-meetup-plugin | 5043c8bb1248bed237d71e5183199c9160e8684c | [
"MIT"
] | null | null | null | #
# Copyright 2020 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import random
import org.slf4j.Logger as Logger
import org.slf4j.LoggerFactory as LoggerFactory
class Client(object):
def __init__(self):
self.logger = LoggerFactory.getLogger("meetup.Client")
self.logger.error("meetup.Client Created ==================")
return
@staticmethod
def get_client():
return Client()
def meetup_randomnumbergenerator(self, variables):
self.minVal = int(variables['minValue'])
self.maxVal = int(variables['maxValue'])
self.logger.error("Min Value = %s" % self.minVal)
self.logger.error("Max Value = %s" % self.maxVal)
self.ranVal = random.randint( self.minVal, self.maxVal )
self.logger.error("Random Number = %s" % self.ranVal)
return {"output" : str(self.ranVal)}
| 58.3125 | 462 | 0.729368 |
59c6040a171753c18ca87faf0daeeaf6f609da66 | 4,968 | py | Python | vaetc/evaluation/metrics/do2020/entropy.py | ganmodokix/vaetc | 866b79677b4f06603203376d967989dedadbffae | [
"MIT"
] | null | null | null | vaetc/evaluation/metrics/do2020/entropy.py | ganmodokix/vaetc | 866b79677b4f06603203376d967989dedadbffae | [
"MIT"
] | null | null | null | vaetc/evaluation/metrics/do2020/entropy.py | ganmodokix/vaetc | 866b79677b4f06603203376d967989dedadbffae | [
"MIT"
] | null | null | null | import numpy as np
from scipy.special import logsumexp, erf, xlogy
def sample_gaussian(mean: np.ndarray, logvar: np.ndarray) -> np.ndarray:
noise = np.random.standard_normal(size=mean.shape)
z = mean + np.exp(logvar * 0.5) * noise
return z
def log_gaussian_density(
mean1: np.ndarray, logvar1: np.ndarray,
mean2: np.ndarray, logvar2: np.ndarray
):
"""
arguments:
mean1, logvar1: sampling batch outside the log with size (B1, L)
mean2, logvar2: sampling batch inside the log with size (B2, L)
returns log(z1|x2) with size (B1, B2, L)
"""
z1 = sample_gaussian(mean1, logvar1) # (B1, L)
std2 = np.exp(logvar2 * 0.5) # (B2, L)
z1 = z1 [:,None,:] # (B1, 1, L)
mean2 = mean2 [None,:,:] # (B2, L, 1)
std2 = std2 [None,:,:] # (B2, L, 1)
logvar2 = logvar2[None,:,:] # (B2, L, 1)
log2pi = np.log(np.pi * 2)
core = (z1 - mean2) / std2 + logvar2 + log2pi
return -core
def entropy_sampling(mean: np.ndarray, logvar: np.ndarray) -> float:
"""
arguments:
mean, logvar: numpy.ndarray with shape (B, L)
returns float H(z)
"""
assert mean.ndim == 2
assert logvar.ndim == 2
assert mean.shape == logvar.shape
batch_size = mean.shape[0]
assert batch_size >= 2
split_size = batch_size // 2
mean1 = mean [:split_size]
logvar1 = logvar[:split_size]
mean2 = mean [split_size:]
logvar2 = logvar[split_size:]
size1 = split_size
size2 = batch_size - split_size
logcore = log_gaussian_density(mean1, logvar1, mean2, logvar2) # (B1, B2, L)
logcore = np.sum(logcore, axis=2) # (B1, B2)
entropy = logsumexp(logcore, axis=1) # (B1, )
entropy = entropy - np.log(size2) # (B1, )
entropy = -np.mean(entropy) # ()
entropy = float(entropy)
return entropy
QUANTIZATION_RANGE = (-4, 4)
QUANTIZATION_BINS = 100
def entropy_quantization(mean: np.ndarray, logvar: np.ndarray, bins: int = QUANTIZATION_BINS):
"""
arguments:
mean: np.ndarray of mean of z_i with shape (B, L)
logvar: np.ndarray of logvar of z_i with shape (B, L)
returns H(z_i) with shape (L, )
Note that its sum is NOT the joint entropy of the entire z
"""
assert bins >= 1
zmin, zmax = QUANTIZATION_RANGE
poss = np.linspace(zmin, zmax, bins+1)
a = poss[None,None, :-1]
b = poss[None,None,1: ]
mean = mean [:,:,None]
logvar = logvar[:,:,None]
std = np.exp(logvar * 0.5)
sqrt2 = 2 ** 0.5
ca = (a - mean) / (std * sqrt2)
cb = (b - mean) / (std * sqrt2)
# qsin[n,i,j] = Q(s_j|x_n)
qsin = 0.5 * (erf(cb) - erf(ca)) # (B, L, bins)
# Q(s_j) = Mean of Q(s_j|x_n) w.r.t. n
qsi = np.mean(qsin, axis=0) # (L, bins)
hzi = np.sum(-xlogy(qsi, qsi), axis=1) # (L, )
return hzi
def entropy_histogram(z: np.ndarray, bins: int = QUANTIZATION_BINS):
"""
z: shape (B, L)
returns H(z) via quantization
"""
assert z.ndim == 2
batch_size, z_dim = z.shape
assert z_dim <= 2, "it suffers from curse of dimension"
eps = 1e-7
# zmin, zmax = np.min(z, axis=0)[None,:] - eps, np.max(z, axis=0)[None,:] + eps
# histogram = np.zeros(shape=(bins, ) * z_dim, dtype=int)
# zpos = ((z - zmin) / (zmax - zmin) * bins).astype(int)
# zpos = np.clip(zpos, 0, bins-1)
# for i in range(batch_size):
# histogram[tuple(zpos[i])] += 1
histogram, zposs = np.histogramdd(z, bins=bins)
zposs = np.stack(zposs)
histogram = histogram.astype(float)
prob = histogram / batch_size
darea = np.prod((zposs[:,-1] - zposs[:,0]) / bins)
ent = np.sum(-xlogy(prob, prob / darea))
return ent
def entropy_conditioned(logvar: np.ndarray) -> np.ndarray:
"""
H(z_i|x)
"""
return ((logvar + np.log(np.pi * 2) + 1) * 0.5).mean(axis=0)
def entropy_binary(binary: np.ndarray) -> np.ndarray:
batch_size, t_dim = binary.shape
p = np.count_nonzero(binary, axis=0) / batch_size
return -xlogy(p, p) - xlogy(1-p, 1-p)
def entropy_joint_binary(
mean: np.ndarray, logvar: np.ndarray,
binary: np.ndarray, threshold: float = 0.5
):
"""
via quantization.
returned[i][k] := H(z_i, y_k)
"""
assert mean.shape == logvar.shape
assert mean.shape[0] == binary.shape[0]
batch_size, z_dim = mean.shape
batch_size, t_dim = binary.shape
mask = (binary >= threshold).T # (t_dim, batch_size)
ent_joint = []
for k in range(t_dim):
n1 = np.count_nonzero(mask[k])
n0 = batch_size - n1
if min(n0, n1) == 0:
ent = entropy_quantization(mean, logvar)
ent_joint += [ent]
else:
ent1 = entropy_quantization(mean[mask[k]], logvar[mask[k]])
ent0 = entropy_quantization(mean[~mask[k]], logvar[~mask[k]])
ent_joint += [(ent0 * n0 + ent1 * n1) / (n0 + n1)]
ent_joint = np.stack(ent_joint, axis=1)
return ent_joint | 27 | 94 | 0.586151 |
c31f6f46801978a181e55c5be6caab6c84375dfb | 1,743 | py | Python | utils/config_helpers.py | cgiliberto/RESDOG | 5e2603251d8673a9360211b57a51177af63def17 | [
"MIT"
] | 1 | 2020-04-03T09:14:48.000Z | 2020-04-03T09:14:48.000Z | utils/config_helpers.py | cgiliberto/RESDOG | 5e2603251d8673a9360211b57a51177af63def17 | [
"MIT"
] | 15 | 2020-01-28T21:50:59.000Z | 2022-03-11T23:19:14.000Z | utils/config_helpers.py | cgiliberto/RESDOG | 5e2603251d8673a9360211b57a51177af63def17 | [
"MIT"
] | null | null | null | from easydict import EasyDict
def merge_configs(config_list):
if config_list == None or len(config_list) == 0:
return None
base_config = config_list[0]
if type(base_config) is dict:
base_config = EasyDict(base_config)
if type(base_config) is not EasyDict:
print("The argument given to 'merge_configs' have to be of type dict or EasyDict.")
return None
for i in range(len(config_list) - 1):
config_to_merge = config_list[i+1]
if type(config_to_merge) is dict:
config_to_merge = EasyDict(config_to_merge)
_merge_add_a_into_b(config_to_merge, base_config)
return base_config
def _merge_add_a_into_b(a, b):
"""
Merge config dictionary a into config dictionary b,
clobbering the options in b whenever they are also specified in a.
New options that are only in a will be added to b.
"""
if type(a) is not EasyDict:
return
for k, v in a.items():
# if the key from a is new to b simply add it
if not k in b:
b[k] = v
continue
# the types must match
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) for config key: {}').format(type(b[k]), type(v), k))
# recursively merge dicts
if type(v) is EasyDict:
try:
_merge_add_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
| 31.690909 | 114 | 0.561675 |
469c4b26e1fc82c3c75d828fe5614375f0049508 | 437 | py | Python | dashboard/internet_nl_dashboard/signals.py | bslavin/Internet.nl-dashboard | 5fd6d8fe8edb5f181727ddd1729697d9fc586c29 | [
"Apache-2.0"
] | null | null | null | dashboard/internet_nl_dashboard/signals.py | bslavin/Internet.nl-dashboard | 5fd6d8fe8edb5f181727ddd1729697d9fc586c29 | [
"Apache-2.0"
] | null | null | null | dashboard/internet_nl_dashboard/signals.py | bslavin/Internet.nl-dashboard | 5fd6d8fe8edb5f181727ddd1729697d9fc586c29 | [
"Apache-2.0"
] | null | null | null | from actstream import action
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.dispatch import receiver
@receiver(user_logged_in)
def stream_login(sender, **kwargs):
# sender = user
action.send(kwargs['user'], verb='logged in', public=False)
@receiver(user_logged_out)
def stream_logout(sender, **kwargs):
# sender = user
action.send(kwargs['user'], verb='logged out', public=False)
| 27.3125 | 71 | 0.748284 |
94bec13e2b71c5749a077a0e42182b44b083ecb8 | 347 | py | Python | tests/test_350_statics.py | manatlan/guy | c6fe47997dfdc314896eabb35f4bd35a0b9c50af | [
"Apache-2.0"
] | 194 | 2019-08-15T16:53:40.000Z | 2022-03-22T02:57:23.000Z | tests/test_350_statics.py | manatlan/guy | c6fe47997dfdc314896eabb35f4bd35a0b9c50af | [
"Apache-2.0"
] | 23 | 2019-11-24T12:31:41.000Z | 2021-04-05T16:37:42.000Z | tests/test_350_statics.py | manatlan/guy | c6fe47997dfdc314896eabb35f4bd35a0b9c50af | [
"Apache-2.0"
] | 24 | 2019-11-14T23:01:40.000Z | 2021-07-20T04:52:18.000Z | # import sys; sys.path.insert(0,"..")
from guy import Guy
def test_useStatic(runner):
class UseStatic(Guy):
classvar=45
def __init__(self,v):
self.instancevar=v
Guy.__init__(self)
def verif(self,a,b):
self.exit(a+b)
t=UseStatic(42)
total=runner(t)
assert total == 87 | 19.277778 | 37 | 0.567723 |
aed2a7ed7694d10777650e61c675273549587b26 | 3,338 | py | Python | guild/model_proxy.py | guildai/guild-cli | d3db493fb7a4952a334684e36578dd4b18afa124 | [
"Apache-2.0"
] | 63 | 2016-11-01T13:06:46.000Z | 2018-08-21T08:38:36.000Z | guild/model_proxy.py | guildai/guild-cli | d3db493fb7a4952a334684e36578dd4b18afa124 | [
"Apache-2.0"
] | 28 | 2016-11-02T01:41:23.000Z | 2018-10-19T22:57:06.000Z | guild/model_proxy.py | guildai/guild-cli | d3db493fb7a4952a334684e36578dd4b18afa124 | [
"Apache-2.0"
] | 8 | 2017-01-15T14:58:43.000Z | 2018-07-27T11:51:39.000Z | # Copyright 2017-2022 RStudio, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import guild
from guild import guildfile
from guild import model as modellib
from guild import plugin as pluginlib
log = logging.getLogger("guild")
class NotSupported(Exception):
pass
class MissingRunOpdef(Exception):
pass
class OpSpecError(Exception):
pass
class BatchModelProxy:
name = ""
op_name = "+"
op_description = "Default batch processor."
module_name = "guild.batch_main"
flag_encoder = None
default_max_trials = None
delete_on_success = True
can_stage_trials = True
flags_data = {}
def __init__(self):
self.modeldef = self._init_modeldef()
self.reference = self._init_reference()
def _init_modeldef(self):
data = {
"operations": {
self.op_name: {
"description": self.op_description,
"exec": "${python_exe} -um %s" % self.module_name,
"flag-encoder": self.flag_encoder,
"default-max-trials": self.default_max_trials,
"flags": self.flags_data,
"env": {
"NO_OP_INTERRUPTED_MSG": "1",
},
"delete-on-success": self.delete_on_success,
"can-stage-trials": self.can_stage_trials,
}
}
}
return modeldef(self.name, data, "<%s>" % self.__class__.__name__)
def _init_reference(self):
return modellib.ModelRef("builtin", "guildai", guild.__version__, self.name)
def modeldef(model_name, model_data, src):
model_data = dict(model_data)
model_data["model"] = model_name
gf_data = [model_data]
gf = guildfile.Guildfile(gf_data, src=src)
return gf.default_model
def resolve_model_op(opspec):
if opspec == "+":
model = BatchModelProxy()
return model, model.op_name
return resolve_plugin_model_op(opspec)
def resolve_plugin_model_op(opspec):
for name, plugin in _plugins_by_resolve_model_op_priority():
log.debug("resolving model op for %r with plugin %r", opspec, name)
try:
model_op = plugin.resolve_model_op(opspec)
except pluginlib.ModelOpResolutionError as e:
raise OpSpecError(e)
else:
if model_op:
log.debug(
"got model op for %r from plugin %r: %s:%s",
opspec,
name,
model_op[0].name,
model_op[1],
)
return model_op
raise NotSupported()
def _plugins_by_resolve_model_op_priority():
return sorted(
pluginlib.iter_plugins(), key=lambda x: x[1].resolve_model_op_priority
)
| 28.775862 | 84 | 0.616836 |
c69f0dff07d65e46ce74eba250c01697d1296e51 | 546 | py | Python | analysis/migrations/0027_auto_20210331_1002.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | analysis/migrations/0027_auto_20210331_1002.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | analysis/migrations/0027_auto_20210331_1002.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | # Generated by Django 3.1.3 on 2021-03-30 23:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analysis', '0026_auto_20210322_1506'),
]
operations = [
migrations.AlterField(
model_name='nodeallelefrequencyrange',
name='max',
field=models.FloatField(),
),
migrations.AlterField(
model_name='nodeallelefrequencyrange',
name='min',
field=models.FloatField(),
),
]
| 22.75 | 50 | 0.582418 |
d140dbcfa025406b1bfa7a502de8cdf16ecbb2f8 | 2,682 | py | Python | ping_client.py | sdwannfv/pyopenvpn | b7e4a9494abaf11bc4404b9c225683e7f55c7600 | [
"MIT"
] | null | null | null | ping_client.py | sdwannfv/pyopenvpn | b7e4a9494abaf11bc4404b9c225683e7f55c7600 | [
"MIT"
] | null | null | null | ping_client.py | sdwannfv/pyopenvpn | b7e4a9494abaf11bc4404b9c225683e7f55c7600 | [
"MIT"
] | null | null | null | #!/bin/python3
import logging
from argparse import ArgumentParser
from datetime import datetime, timedelta
from scapy.all import *
from pyopenvpn import Client, Settings
class PingClient:
def __init__(self, args):
self.host = args.host
self.interval = timedelta(seconds=args.interval)
self.timeout = timedelta(seconds=args.timeout)
self.count = args.count
print("Pinging %s..." % self.host)
self.pings = []
def __call__(self, client):
while True:
incoming = client.recv_data()
if not incoming:
break
if incoming.src != self.host:
continue
if not isinstance(incoming.payload, ICMP):
continue
in_icmp = incoming.payload
if in_icmp.type != 0:
continue
seq = in_icmp.seq
if seq >= len(self.pings):
continue
ttl = incoming.ttl
time = (datetime.now() - self.pings[seq]['time']).total_seconds() * 1000
self.pings[seq]['received'] = True
print('reply from %s: icmp_seq=%d ttl=%d time=%.1fms' %
(self.host, seq, ttl, time))
if self.count > 0 and len(self.pings) >= self.count:
client.stop()
return
if self.pings:
if (datetime.now() - self.pings[-1]['time']) > self.timeout \
and self.pings[-1]['received'] is None:
print('timeout')
self.pings[-1]['received'] = False
if self.count > 0 and len(self.pings) >= self.count:
client.stop()
return
if not self.pings or (datetime.now() - self.pings[-1]['time']) > self.interval:
p = IP(src=client.tunnel_ipv4, dst=self.host) / ICMP(seq=len(self.pings))
client.send_data(p)
self.pings.append({'time': datetime.now(), 'received': None})
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format="%(levelname)-5s:%(name)-8s: %(message)s")
parser = ArgumentParser()
parser.add_argument('config_file', help="OpenVPN configuration file")
parser.add_argument('host', help="Remote host to ping")
parser.add_argument('-i', dest='interval', default=1, metavar='interval', type=int)
parser.add_argument('-W', dest='timeout', default=5, metavar='timeout', type=int)
parser.add_argument('-c', dest='count', default=0, metavar='count', type=int)
args = parser.parse_args()
c = Client(Settings.from_file(args.config_file), PingClient(args))
c.run()
| 33.525 | 87 | 0.564504 |
1387adc628a1ce90c0bb7420923ce99196e0aa69 | 719 | py | Python | meetings/utils/html_template.py | githubliuyang777/app-meeting-server | 0775691545a33d4ad65c6a8329d34a63dc09ac8b | [
"Apache-2.0"
] | 1 | 2021-01-26T08:44:17.000Z | 2021-01-26T08:44:17.000Z | meetings/utils/html_template.py | githubliuyang777/app-meeting-server | 0775691545a33d4ad65c6a8329d34a63dc09ac8b | [
"Apache-2.0"
] | 6 | 2020-08-07T15:09:02.000Z | 2022-03-14T07:51:05.000Z | meetings/utils/html_template.py | githubliuyang777/app-meeting-server | 0775691545a33d4ad65c6a8329d34a63dc09ac8b | [
"Apache-2.0"
] | 10 | 2020-08-05T10:04:13.000Z | 2022-03-01T12:06:31.000Z | def cover_content(topic, group_name, date, start_time, end_time):
content = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>cover</title>
</head>
<body>
<div style="display: inline-block; height: 688px; width: 1024px; text-align: center; background-image: url('cover.png')">
<p style="font-size: 100px;margin-top: 150px; color: white"><b>{0}</b></p>
<p style="font-size: 80px; margin: 0; color: white">SIG: {1}</p>
<p style="font-size: 60px; margin: 0; color: white">Time: {2} {3}-{4}</p>
</div>
</body>
</html>
""".format(topic, group_name, date, start_time, end_time)
return content
| 37.842105 | 129 | 0.568846 |
dcff8cd6e61d5d31698df84f0c38a0c524c876ea | 135 | py | Python | avatar/apps.py | johanneswilm/django-avatar | 348fade5a802eb7163c8f9684c824df7954d1286 | [
"BSD-3-Clause"
] | null | null | null | avatar/apps.py | johanneswilm/django-avatar | 348fade5a802eb7163c8f9684c824df7954d1286 | [
"BSD-3-Clause"
] | null | null | null | avatar/apps.py | johanneswilm/django-avatar | 348fade5a802eb7163c8f9684c824df7954d1286 | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
class Config(AppConfig):
name = 'avatar'
default_auto_field = 'django.db.models.AutoField'
| 19.285714 | 53 | 0.740741 |
0f2974919dab42319abecb7aadc0491acc313ca3 | 11,455 | py | Python | perfkitbenchmarker/linux_packages/mutilate.py | doitintl/PerfKitBenchmarker | 9aa10195ab945025790f5280cdc2fcfa8073f6c5 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/mutilate.py | doitintl/PerfKitBenchmarker | 9aa10195ab945025790f5280cdc2fcfa8073f6c5 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/mutilate.py | doitintl/PerfKitBenchmarker | 9aa10195ab945025790f5280cdc2fcfa8073f6c5 | [
"Apache-2.0"
] | 1 | 2021-07-12T16:09:26.000Z | 2021-07-12T16:09:26.000Z | # Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing mutilate installation and cleanup functions."""
import logging
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
GIT_REPO = 'https://github.com/leverich/mutilate'
MUTILATE_DIR = '%s/mutilate_benchmark' % linux_packages.INSTALL_DIR
MUTILATE_BIN = '%s/mutilate' % MUTILATE_DIR
APT_PACKAGES = 'scons libevent-dev gengetopt libzmq-dev'
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'mutilate_protocol',
'binary', ['binary', 'ascii'],
'Protocol to use. Supported protocols are binary and ascii.')
flags.DEFINE_list(
'mutilate_qps', [],
'Target aggregate QPS. If not set, target for peak qps.')
flags.DEFINE_integer(
'mutilate_time', 300,
'Maximum time to run (seconds).')
flags.DEFINE_string(
'mutilate_keysize', '16',
'Length of memcached keys (distribution).')
flags.DEFINE_string(
'mutilate_valuesize', '128',
'Length of memcached values (distribution).')
flags.DEFINE_integer(
'mutilate_records', 10000,
'Number of memcached records to use.')
flags.DEFINE_float(
'mutilate_ratio', 0.0,
'Ratio of set:get. By default, read only.')
flags.DEFINE_list(
'mutilate_options', [],
'Additional mutilate long-form options (--) in comma separated form. e.g.'
'--mutilate_options=blocking,search=99:1000.'
'See https://github.com/leverich/mutilate for all available options.')
# If more than one value provided for threads, connections, depths, we will
# enumerate all test configurations. e.g.
# threads=1,2; connections=3,4; depths=5,6
# We will test following threads:connections:depths:
# 1,3,5; 1,3,6; 1,4,5; 1,4,6; 2,3,5; 2,3,6; 2,4,5; 2,4,6;
flags.DEFINE_list(
'mutilate_threads', [1],
'Number of total client threads to spawn per client VM.')
flags.DEFINE_list(
'mutilate_connections', [1],
'Number of connections to establish per client thread.')
flags.DEFINE_list(
'mutilate_depths', [1],
'Maximum depth to pipeline requests.')
# Agent mode options.
flags.DEFINE_integer(
'mutilate_measure_connections', None,
'Master client connections.')
flags.DEFINE_integer(
'mutilate_measure_threads', None,
'Master client thread count.')
flags.DEFINE_integer(
'mutilate_measure_qps', None,
'Master client QPS.')
flags.DEFINE_integer(
'mutilate_measure_depth', None,
'Master client connection depth.')
_INCREMENTAL_LOAD = flags.DEFINE_float(
'mutilate_incremental_load', None, 'Increments target qps until hits peak.')
# To use remote agent mode, we need at least 2 VMs.
AGENT_MODE_MIN_CLIENT_VMS = 2
def CheckPrerequisites():
"""Verify flags are correctly specified.
Raises:
errors.Setup.InvalidFlagConfigurationError: On invalid flag configurations.
"""
agent_mode_flags = [FLAGS['mutilate_measure_connections'].present,
FLAGS['mutilate_measure_threads'].present,
FLAGS['mutilate_measure_qps'].present,
FLAGS['mutilate_measure_depth'].present]
error_message = (
'To enable agent mode, set '
'memcached_mutilate_num_client_vms > 1.')
if any(agent_mode_flags) and (
FLAGS.memcached_mutilate_num_client_vms < AGENT_MODE_MIN_CLIENT_VMS):
raise errors.Setup.InvalidFlagConfigurationError(error_message)
if _INCREMENTAL_LOAD.value and (len(FLAGS.mutilate_qps) != 1 or
int(FLAGS.mutilate_qps[0]) == 0):
raise errors.Setup.InvalidFlagConfigurationError(
'To use dynamic load, set inital target qps with --mutilate_qps '
'and incremental with --mutilate_incremental_load.')
def YumInstall(vm):
"""Installs the mutilate package on the VM."""
raise NotImplementedError
def AptInstall(vm):
"""Installs the mutilate package on the VM."""
vm.Install('build_tools')
vm.InstallPackages(APT_PACKAGES)
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, MUTILATE_DIR))
vm.RemoteCommand(
f'sed -i "s|int total|long total|g" {MUTILATE_DIR}/mutilate.cc')
vm.RemoteCommand('cd {0} && sudo scons'.format(MUTILATE_DIR))
def GetMetadata():
"""Returns mutilate metadata."""
metadata = {
'protocol': FLAGS.mutilate_protocol,
'qps': FLAGS.mutilate_qps or 'peak',
'time': FLAGS.mutilate_time,
'keysize': FLAGS.mutilate_keysize,
'valuesize': FLAGS.mutilate_valuesize,
'records': FLAGS.mutilate_records,
'ratio': FLAGS.mutilate_ratio
}
if FLAGS.mutilate_options:
metadata['options'] = FLAGS.mutilate_options
return metadata
def BuildCmd(server_ip, server_port, num_instances, options):
"""Build base mutilate command in a list."""
server_ips = []
for idx in range(num_instances):
server_ips.append(f'--server={server_ip}:{server_port + idx}')
cmd = [
'ulimit -n 32768; ', MUTILATE_BIN,
'--keysize=%s' % FLAGS.mutilate_keysize,
'--valuesize=%s' % FLAGS.mutilate_valuesize,
'--records=%s' % FLAGS.mutilate_records,
'--roundrobin' if len(server_ips) > 1 else ''
] + server_ips + options
if FLAGS.mutilate_protocol == 'binary':
cmd.append('--binary')
return cmd
def Load(client_vm, server_ip, server_port):
"""Preload the server with data."""
logging.info('Loading memcached server.')
cmd = BuildCmd(
server_ip, server_port, 1,
['--loadonly'])
client_vm.RemoteCommand(' '.join(cmd))
def RestartAgent(vm, threads):
logging.info('Restarting mutilate remote agent on %s', vm.internal_ip)
# Kill existing mutilate agent threads
vm.RemoteCommand('pkill -9 mutilate', ignore_failure=True)
# Make sure have enough file descriptor for the agent process.
vm.RemoteCommand(' '.join([
'ulimit -n 32768; '
'nohup', MUTILATE_BIN,
'--threads=%s' % threads, '--agentmode', '&> log', '&'
]))
def Run(vms, server_ip, server_port, num_instances):
"""Runs the mutilate benchmark on the vm."""
samples = []
master = vms[0]
runtime_options = {}
samples = []
measure_flags = []
additional_flags = ['--%s' % option for option in FLAGS.mutilate_options]
if FLAGS.mutilate_measure_connections:
runtime_options['measure_connections'] = FLAGS.mutilate_measure_connections
measure_flags.append(
'--measure_connections=%s' % FLAGS.mutilate_measure_connections)
if FLAGS.mutilate_measure_threads:
runtime_options['measure_threads'] = FLAGS.mutilate_measure_threads
if FLAGS.mutilate_measure_qps:
runtime_options['measure_qps'] = FLAGS.mutilate_measure_qps
measure_flags.append(
'--measure_qps=%s' % FLAGS.mutilate_measure_qps)
if FLAGS.mutilate_measure_depth:
runtime_options['measure_depth'] = FLAGS.mutilate_measure_depth
measure_flags.append(
'--measure_depth=%s' % FLAGS.mutilate_measure_depth)
for thread_count in FLAGS.mutilate_threads:
runtime_options['threads'] = thread_count
for vm in vms[1:]:
RestartAgent(vm, thread_count)
for connection_count in FLAGS.mutilate_connections:
runtime_options['connections'] = connection_count
for depth in FLAGS.mutilate_depths:
runtime_options['depth'] = depth
target_qps_list = FLAGS.mutilate_qps[:] or [0]
while True:
target_qps = int(target_qps_list[0])
runtime_options['qps'] = target_qps or 'peak'
remote_agents = ['--agent=%s' % vm.internal_ip for vm in vms[1:]]
cmd = BuildCmd(server_ip, server_port, num_instances, [
'--noload',
'--qps=%s' % target_qps,
'--time=%s' % FLAGS.mutilate_time,
'--update=%s' % FLAGS.mutilate_ratio,
'--threads=%s' % (FLAGS.mutilate_measure_threads or thread_count),
'--connections=%s' % connection_count,
'--depth=%s' % depth,
] + remote_agents + measure_flags + additional_flags)
try:
stdout, _, retcode = master.RemoteHostCommandWithReturnCode(
' '.join(cmd), timeout=FLAGS.mutilate_time * 2,
ignore_failure=True)
except errors.VmUtil.IssueCommandTimeoutError:
break
if retcode:
break
metadata = GetMetadata()
metadata.update(runtime_options)
run_samples, actual_qps = ParseResults(stdout, metadata)
samples.extend(run_samples)
if _INCREMENTAL_LOAD.value and (actual_qps / target_qps >
(1 - _INCREMENTAL_LOAD.value * 2)):
target_qps_list.append(
int(target_qps) * (1 + _INCREMENTAL_LOAD.value))
target_qps_list.pop(0)
if not target_qps_list:
break
return samples
LATENCY_HEADER_REGEX = r'#type([\s\w\d]*)\n'
LATENCY_REGEX = r'([\s\d\.]*)'
QPS_REGEX = r'Total QPS = ([\d\.]*)'
MISS_REGEX = r'Misses = \d+ \(([\d\.]*)%\)'
BANDWIDTH_REGEX = r'[\s\d]*bytes :\s*([\d\.]*) MB/s'
def ParseResults(result, metadata):
"""Parse mutilate result into samples.
Sample Output:
#type avg min 1st 5th 10th 90th 95th 99th
read 52.4 41.0 43.1 45.2 48.1 55.8 56.6 71.5
update 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
op_q 1.5 1.0 1.0 1.1 1.1 1.9 2.0 2.0
Total QPS = 18416.6 (92083 / 5.0s)
Misses = 0 (0.0%)
RX 22744501 bytes : 4.3 MB/s
TX 3315024 bytes : 0.6 MB/s
Args:
result: Text output of running mutilate benchmark.
metadata: metadata associated with the results.
Returns:
List of sample.Sample objects and actual qps.
"""
samples = []
if FLAGS.mutilate_ratio < 1.0:
# N/A for write only workloads.
misses = regex_util.ExtractGroup(MISS_REGEX, result)
metadata['miss_rate'] = float(misses)
latency_stats = regex_util.ExtractGroup(LATENCY_HEADER_REGEX, result).split()
# parse latency
for metric in ('read', 'update', 'op_q'):
latency_regex = metric + LATENCY_REGEX
latency_values = regex_util.ExtractGroup(latency_regex, result).split()
for idx, stat in enumerate(latency_stats):
if idx == len(latency_values):
logging.warning(
'Mutilate does not report %s latency for %s.', stat, metric)
break
samples.append(
sample.Sample(metric + '_' + stat,
float(latency_values[idx]),
'usec', metadata))
# parse bandwidth
for metric in ('TX', 'RX'):
bw_regex = metric + BANDWIDTH_REGEX
bw = regex_util.ExtractGroup(bw_regex, result)
samples.append(
sample.Sample(metric, float(bw), 'MB/s', metadata))
qps = regex_util.ExtractFloat(QPS_REGEX, result)
samples.append(sample.Sample('qps', qps, 'ops/s', metadata))
return samples, qps
| 35.685358 | 80 | 0.669926 |
56ce9f3df1d8834c2204c5641afdf9f569f6eed7 | 75,307 | py | Python | sdk/python/pulumi_aws/cloudtrail/trail.py | dmelo/pulumi-aws | dd1a08d1fb93bab0d046aa410ca660f05ca0a58c | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-10T16:33:40.000Z | 2021-11-10T16:33:40.000Z | sdk/python/pulumi_aws/cloudtrail/trail.py | dmelo/pulumi-aws | dd1a08d1fb93bab0d046aa410ca660f05ca0a58c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cloudtrail/trail.py | dmelo/pulumi-aws | dd1a08d1fb93bab0d046aa410ca660f05ca0a58c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['TrailArgs', 'Trail']
@pulumi.input_type
class TrailArgs:
def __init__(__self__, *,
s3_bucket_name: pulumi.Input[str],
advanced_event_selectors: Optional[pulumi.Input[Sequence[pulumi.Input['TrailAdvancedEventSelectorArgs']]]] = None,
cloud_watch_logs_group_arn: Optional[pulumi.Input[str]] = None,
cloud_watch_logs_role_arn: Optional[pulumi.Input[str]] = None,
enable_log_file_validation: Optional[pulumi.Input[bool]] = None,
enable_logging: Optional[pulumi.Input[bool]] = None,
event_selectors: Optional[pulumi.Input[Sequence[pulumi.Input['TrailEventSelectorArgs']]]] = None,
include_global_service_events: Optional[pulumi.Input[bool]] = None,
insight_selectors: Optional[pulumi.Input[Sequence[pulumi.Input['TrailInsightSelectorArgs']]]] = None,
is_multi_region_trail: Optional[pulumi.Input[bool]] = None,
is_organization_trail: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
s3_key_prefix: Optional[pulumi.Input[str]] = None,
sns_topic_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Trail resource.
:param pulumi.Input[str] s3_bucket_name: Name of the S3 bucket designated for publishing log files.
:param pulumi.Input[Sequence[pulumi.Input['TrailAdvancedEventSelectorArgs']]] advanced_event_selectors: Specifies an advanced event selector for enabling data event logging. Fields documented below. Conflicts with `event_selector`.
:param pulumi.Input[str] cloud_watch_logs_group_arn: Log group name using an ARN that represents the log group to which CloudTrail logs will be delivered. Note that CloudTrail requires the Log Stream wildcard.
:param pulumi.Input[str] cloud_watch_logs_role_arn: Role for the CloudWatch Logs endpoint to assume to write to a user’s log group.
:param pulumi.Input[bool] enable_log_file_validation: Whether log file integrity validation is enabled. Defaults to `false`.
:param pulumi.Input[bool] enable_logging: Enables logging for the trail. Defaults to `true`. Setting this to `false` will pause logging.
:param pulumi.Input[Sequence[pulumi.Input['TrailEventSelectorArgs']]] event_selectors: Specifies an event selector for enabling data event logging. Fields documented below. Please note the [CloudTrail limits](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) when configuring these. Conflicts with `advanced_event_selector`.
:param pulumi.Input[bool] include_global_service_events: Whether the trail is publishing events from global services such as IAM to the log files. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input['TrailInsightSelectorArgs']]] insight_selectors: Configuration block for identifying unusual operational activity. See details below.
:param pulumi.Input[bool] is_multi_region_trail: Whether the trail is created in the current region or in all regions. Defaults to `false`.
:param pulumi.Input[bool] is_organization_trail: Whether the trail is an AWS Organizations trail. Organization trails log events for the master account and all member accounts. Can only be created in the organization master account. Defaults to `false`.
:param pulumi.Input[str] kms_key_id: KMS key ARN to use to encrypt the logs delivered by CloudTrail.
:param pulumi.Input[str] name: Specifies the name of the advanced event selector.
:param pulumi.Input[str] s3_key_prefix: S3 key prefix that follows the name of the bucket you have designated for log file delivery.
:param pulumi.Input[str] sns_topic_name: Name of the Amazon SNS topic defined for notification of log file delivery.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the trail. If configured with provider defaultTags present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "s3_bucket_name", s3_bucket_name)
if advanced_event_selectors is not None:
pulumi.set(__self__, "advanced_event_selectors", advanced_event_selectors)
if cloud_watch_logs_group_arn is not None:
pulumi.set(__self__, "cloud_watch_logs_group_arn", cloud_watch_logs_group_arn)
if cloud_watch_logs_role_arn is not None:
pulumi.set(__self__, "cloud_watch_logs_role_arn", cloud_watch_logs_role_arn)
if enable_log_file_validation is not None:
pulumi.set(__self__, "enable_log_file_validation", enable_log_file_validation)
if enable_logging is not None:
pulumi.set(__self__, "enable_logging", enable_logging)
if event_selectors is not None:
pulumi.set(__self__, "event_selectors", event_selectors)
if include_global_service_events is not None:
pulumi.set(__self__, "include_global_service_events", include_global_service_events)
if insight_selectors is not None:
pulumi.set(__self__, "insight_selectors", insight_selectors)
if is_multi_region_trail is not None:
pulumi.set(__self__, "is_multi_region_trail", is_multi_region_trail)
if is_organization_trail is not None:
pulumi.set(__self__, "is_organization_trail", is_organization_trail)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name is not None:
pulumi.set(__self__, "name", name)
if s3_key_prefix is not None:
pulumi.set(__self__, "s3_key_prefix", s3_key_prefix)
if sns_topic_name is not None:
pulumi.set(__self__, "sns_topic_name", sns_topic_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="s3BucketName")
def s3_bucket_name(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket designated for publishing log files.
"""
return pulumi.get(self, "s3_bucket_name")
@s3_bucket_name.setter
def s3_bucket_name(self, value: pulumi.Input[str]):
pulumi.set(self, "s3_bucket_name", value)
@property
@pulumi.getter(name="advancedEventSelectors")
def advanced_event_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TrailAdvancedEventSelectorArgs']]]]:
"""
Specifies an advanced event selector for enabling data event logging. Fields documented below. Conflicts with `event_selector`.
"""
return pulumi.get(self, "advanced_event_selectors")
@advanced_event_selectors.setter
def advanced_event_selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TrailAdvancedEventSelectorArgs']]]]):
pulumi.set(self, "advanced_event_selectors", value)
@property
@pulumi.getter(name="cloudWatchLogsGroupArn")
def cloud_watch_logs_group_arn(self) -> Optional[pulumi.Input[str]]:
"""
Log group name using an ARN that represents the log group to which CloudTrail logs will be delivered. Note that CloudTrail requires the Log Stream wildcard.
"""
return pulumi.get(self, "cloud_watch_logs_group_arn")
@cloud_watch_logs_group_arn.setter
def cloud_watch_logs_group_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_watch_logs_group_arn", value)
@property
@pulumi.getter(name="cloudWatchLogsRoleArn")
def cloud_watch_logs_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Role for the CloudWatch Logs endpoint to assume to write to a user’s log group.
"""
return pulumi.get(self, "cloud_watch_logs_role_arn")
@cloud_watch_logs_role_arn.setter
def cloud_watch_logs_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_watch_logs_role_arn", value)
@property
@pulumi.getter(name="enableLogFileValidation")
def enable_log_file_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Whether log file integrity validation is enabled. Defaults to `false`.
"""
return pulumi.get(self, "enable_log_file_validation")
@enable_log_file_validation.setter
def enable_log_file_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_log_file_validation", value)
@property
@pulumi.getter(name="enableLogging")
def enable_logging(self) -> Optional[pulumi.Input[bool]]:
"""
Enables logging for the trail. Defaults to `true`. Setting this to `false` will pause logging.
"""
return pulumi.get(self, "enable_logging")
@enable_logging.setter
def enable_logging(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_logging", value)
@property
@pulumi.getter(name="eventSelectors")
def event_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TrailEventSelectorArgs']]]]:
"""
Specifies an event selector for enabling data event logging. Fields documented below. Please note the [CloudTrail limits](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) when configuring these. Conflicts with `advanced_event_selector`.
"""
return pulumi.get(self, "event_selectors")
@event_selectors.setter
def event_selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TrailEventSelectorArgs']]]]):
pulumi.set(self, "event_selectors", value)
@property
@pulumi.getter(name="includeGlobalServiceEvents")
def include_global_service_events(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the trail is publishing events from global services such as IAM to the log files. Defaults to `true`.
"""
return pulumi.get(self, "include_global_service_events")
@include_global_service_events.setter
def include_global_service_events(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_global_service_events", value)
@property
@pulumi.getter(name="insightSelectors")
def insight_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TrailInsightSelectorArgs']]]]:
"""
Configuration block for identifying unusual operational activity. See details below.
"""
return pulumi.get(self, "insight_selectors")
@insight_selectors.setter
def insight_selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TrailInsightSelectorArgs']]]]):
pulumi.set(self, "insight_selectors", value)
@property
@pulumi.getter(name="isMultiRegionTrail")
def is_multi_region_trail(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the trail is created in the current region or in all regions. Defaults to `false`.
"""
return pulumi.get(self, "is_multi_region_trail")
@is_multi_region_trail.setter
def is_multi_region_trail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_multi_region_trail", value)
@property
@pulumi.getter(name="isOrganizationTrail")
def is_organization_trail(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the trail is an AWS Organizations trail. Organization trails log events for the master account and all member accounts. Can only be created in the organization master account. Defaults to `false`.
"""
return pulumi.get(self, "is_organization_trail")
@is_organization_trail.setter
def is_organization_trail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_organization_trail", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
KMS key ARN to use to encrypt the logs delivered by CloudTrail.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the advanced event selector.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="s3KeyPrefix")
def s3_key_prefix(self) -> Optional[pulumi.Input[str]]:
"""
S3 key prefix that follows the name of the bucket you have designated for log file delivery.
"""
return pulumi.get(self, "s3_key_prefix")
@s3_key_prefix.setter
def s3_key_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "s3_key_prefix", value)
@property
@pulumi.getter(name="snsTopicName")
def sns_topic_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Amazon SNS topic defined for notification of log file delivery.
"""
return pulumi.get(self, "sns_topic_name")
@sns_topic_name.setter
def sns_topic_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sns_topic_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to assign to the trail. If configured with provider defaultTags present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _TrailState:
def __init__(__self__, *,
advanced_event_selectors: Optional[pulumi.Input[Sequence[pulumi.Input['TrailAdvancedEventSelectorArgs']]]] = None,
arn: Optional[pulumi.Input[str]] = None,
cloud_watch_logs_group_arn: Optional[pulumi.Input[str]] = None,
cloud_watch_logs_role_arn: Optional[pulumi.Input[str]] = None,
enable_log_file_validation: Optional[pulumi.Input[bool]] = None,
enable_logging: Optional[pulumi.Input[bool]] = None,
event_selectors: Optional[pulumi.Input[Sequence[pulumi.Input['TrailEventSelectorArgs']]]] = None,
home_region: Optional[pulumi.Input[str]] = None,
include_global_service_events: Optional[pulumi.Input[bool]] = None,
insight_selectors: Optional[pulumi.Input[Sequence[pulumi.Input['TrailInsightSelectorArgs']]]] = None,
is_multi_region_trail: Optional[pulumi.Input[bool]] = None,
is_organization_trail: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
s3_bucket_name: Optional[pulumi.Input[str]] = None,
s3_key_prefix: Optional[pulumi.Input[str]] = None,
sns_topic_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Trail resources.
:param pulumi.Input[Sequence[pulumi.Input['TrailAdvancedEventSelectorArgs']]] advanced_event_selectors: Specifies an advanced event selector for enabling data event logging. Fields documented below. Conflicts with `event_selector`.
:param pulumi.Input[str] arn: ARN of the trail.
:param pulumi.Input[str] cloud_watch_logs_group_arn: Log group name using an ARN that represents the log group to which CloudTrail logs will be delivered. Note that CloudTrail requires the Log Stream wildcard.
:param pulumi.Input[str] cloud_watch_logs_role_arn: Role for the CloudWatch Logs endpoint to assume to write to a user’s log group.
:param pulumi.Input[bool] enable_log_file_validation: Whether log file integrity validation is enabled. Defaults to `false`.
:param pulumi.Input[bool] enable_logging: Enables logging for the trail. Defaults to `true`. Setting this to `false` will pause logging.
:param pulumi.Input[Sequence[pulumi.Input['TrailEventSelectorArgs']]] event_selectors: Specifies an event selector for enabling data event logging. Fields documented below. Please note the [CloudTrail limits](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) when configuring these. Conflicts with `advanced_event_selector`.
:param pulumi.Input[str] home_region: Region in which the trail was created.
:param pulumi.Input[bool] include_global_service_events: Whether the trail is publishing events from global services such as IAM to the log files. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input['TrailInsightSelectorArgs']]] insight_selectors: Configuration block for identifying unusual operational activity. See details below.
:param pulumi.Input[bool] is_multi_region_trail: Whether the trail is created in the current region or in all regions. Defaults to `false`.
:param pulumi.Input[bool] is_organization_trail: Whether the trail is an AWS Organizations trail. Organization trails log events for the master account and all member accounts. Can only be created in the organization master account. Defaults to `false`.
:param pulumi.Input[str] kms_key_id: KMS key ARN to use to encrypt the logs delivered by CloudTrail.
:param pulumi.Input[str] name: Specifies the name of the advanced event selector.
:param pulumi.Input[str] s3_bucket_name: Name of the S3 bucket designated for publishing log files.
:param pulumi.Input[str] s3_key_prefix: S3 key prefix that follows the name of the bucket you have designated for log file delivery.
:param pulumi.Input[str] sns_topic_name: Name of the Amazon SNS topic defined for notification of log file delivery.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the trail. If configured with provider defaultTags present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: Map of tags assigned to the resource, including those inherited from the provider.
"""
if advanced_event_selectors is not None:
pulumi.set(__self__, "advanced_event_selectors", advanced_event_selectors)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if cloud_watch_logs_group_arn is not None:
pulumi.set(__self__, "cloud_watch_logs_group_arn", cloud_watch_logs_group_arn)
if cloud_watch_logs_role_arn is not None:
pulumi.set(__self__, "cloud_watch_logs_role_arn", cloud_watch_logs_role_arn)
if enable_log_file_validation is not None:
pulumi.set(__self__, "enable_log_file_validation", enable_log_file_validation)
if enable_logging is not None:
pulumi.set(__self__, "enable_logging", enable_logging)
if event_selectors is not None:
pulumi.set(__self__, "event_selectors", event_selectors)
if home_region is not None:
pulumi.set(__self__, "home_region", home_region)
if include_global_service_events is not None:
pulumi.set(__self__, "include_global_service_events", include_global_service_events)
if insight_selectors is not None:
pulumi.set(__self__, "insight_selectors", insight_selectors)
if is_multi_region_trail is not None:
pulumi.set(__self__, "is_multi_region_trail", is_multi_region_trail)
if is_organization_trail is not None:
pulumi.set(__self__, "is_organization_trail", is_organization_trail)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name is not None:
pulumi.set(__self__, "name", name)
if s3_bucket_name is not None:
pulumi.set(__self__, "s3_bucket_name", s3_bucket_name)
if s3_key_prefix is not None:
pulumi.set(__self__, "s3_key_prefix", s3_key_prefix)
if sns_topic_name is not None:
pulumi.set(__self__, "sns_topic_name", sns_topic_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="advancedEventSelectors")
def advanced_event_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TrailAdvancedEventSelectorArgs']]]]:
"""
Specifies an advanced event selector for enabling data event logging. Fields documented below. Conflicts with `event_selector`.
"""
return pulumi.get(self, "advanced_event_selectors")
@advanced_event_selectors.setter
def advanced_event_selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TrailAdvancedEventSelectorArgs']]]]):
pulumi.set(self, "advanced_event_selectors", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the trail.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="cloudWatchLogsGroupArn")
def cloud_watch_logs_group_arn(self) -> Optional[pulumi.Input[str]]:
"""
Log group name using an ARN that represents the log group to which CloudTrail logs will be delivered. Note that CloudTrail requires the Log Stream wildcard.
"""
return pulumi.get(self, "cloud_watch_logs_group_arn")
@cloud_watch_logs_group_arn.setter
def cloud_watch_logs_group_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_watch_logs_group_arn", value)
@property
@pulumi.getter(name="cloudWatchLogsRoleArn")
def cloud_watch_logs_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Role for the CloudWatch Logs endpoint to assume to write to a user’s log group.
"""
return pulumi.get(self, "cloud_watch_logs_role_arn")
@cloud_watch_logs_role_arn.setter
def cloud_watch_logs_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_watch_logs_role_arn", value)
@property
@pulumi.getter(name="enableLogFileValidation")
def enable_log_file_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Whether log file integrity validation is enabled. Defaults to `false`.
"""
return pulumi.get(self, "enable_log_file_validation")
@enable_log_file_validation.setter
def enable_log_file_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_log_file_validation", value)
@property
@pulumi.getter(name="enableLogging")
def enable_logging(self) -> Optional[pulumi.Input[bool]]:
"""
Enables logging for the trail. Defaults to `true`. Setting this to `false` will pause logging.
"""
return pulumi.get(self, "enable_logging")
@enable_logging.setter
def enable_logging(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_logging", value)
@property
@pulumi.getter(name="eventSelectors")
def event_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TrailEventSelectorArgs']]]]:
"""
Specifies an event selector for enabling data event logging. Fields documented below. Please note the [CloudTrail limits](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) when configuring these. Conflicts with `advanced_event_selector`.
"""
return pulumi.get(self, "event_selectors")
@event_selectors.setter
def event_selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TrailEventSelectorArgs']]]]):
pulumi.set(self, "event_selectors", value)
@property
@pulumi.getter(name="homeRegion")
def home_region(self) -> Optional[pulumi.Input[str]]:
"""
Region in which the trail was created.
"""
return pulumi.get(self, "home_region")
@home_region.setter
def home_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "home_region", value)
@property
@pulumi.getter(name="includeGlobalServiceEvents")
def include_global_service_events(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the trail is publishing events from global services such as IAM to the log files. Defaults to `true`.
"""
return pulumi.get(self, "include_global_service_events")
@include_global_service_events.setter
def include_global_service_events(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_global_service_events", value)
@property
@pulumi.getter(name="insightSelectors")
def insight_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TrailInsightSelectorArgs']]]]:
"""
Configuration block for identifying unusual operational activity. See details below.
"""
return pulumi.get(self, "insight_selectors")
@insight_selectors.setter
def insight_selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TrailInsightSelectorArgs']]]]):
pulumi.set(self, "insight_selectors", value)
@property
@pulumi.getter(name="isMultiRegionTrail")
def is_multi_region_trail(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the trail is created in the current region or in all regions. Defaults to `false`.
"""
return pulumi.get(self, "is_multi_region_trail")
@is_multi_region_trail.setter
def is_multi_region_trail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_multi_region_trail", value)
@property
@pulumi.getter(name="isOrganizationTrail")
def is_organization_trail(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the trail is an AWS Organizations trail. Organization trails log events for the master account and all member accounts. Can only be created in the organization master account. Defaults to `false`.
"""
return pulumi.get(self, "is_organization_trail")
@is_organization_trail.setter
def is_organization_trail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_organization_trail", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
KMS key ARN to use to encrypt the logs delivered by CloudTrail.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the advanced event selector.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="s3BucketName")
def s3_bucket_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the S3 bucket designated for publishing log files.
"""
return pulumi.get(self, "s3_bucket_name")
@s3_bucket_name.setter
def s3_bucket_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "s3_bucket_name", value)
@property
@pulumi.getter(name="s3KeyPrefix")
def s3_key_prefix(self) -> Optional[pulumi.Input[str]]:
"""
S3 key prefix that follows the name of the bucket you have designated for log file delivery.
"""
return pulumi.get(self, "s3_key_prefix")
@s3_key_prefix.setter
def s3_key_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "s3_key_prefix", value)
@property
@pulumi.getter(name="snsTopicName")
def sns_topic_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Amazon SNS topic defined for notification of log file delivery.
"""
return pulumi.get(self, "sns_topic_name")
@sns_topic_name.setter
def sns_topic_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sns_topic_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to assign to the trail. If configured with provider defaultTags present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags assigned to the resource, including those inherited from the provider.
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class Trail(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
advanced_event_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailAdvancedEventSelectorArgs']]]]] = None,
cloud_watch_logs_group_arn: Optional[pulumi.Input[str]] = None,
cloud_watch_logs_role_arn: Optional[pulumi.Input[str]] = None,
enable_log_file_validation: Optional[pulumi.Input[bool]] = None,
enable_logging: Optional[pulumi.Input[bool]] = None,
event_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailEventSelectorArgs']]]]] = None,
include_global_service_events: Optional[pulumi.Input[bool]] = None,
insight_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailInsightSelectorArgs']]]]] = None,
is_multi_region_trail: Optional[pulumi.Input[bool]] = None,
is_organization_trail: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
s3_bucket_name: Optional[pulumi.Input[str]] = None,
s3_key_prefix: Optional[pulumi.Input[str]] = None,
sns_topic_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a CloudTrail resource.
> **Tip:** For a multi-region trail, this resource must be in the home region of the trail.
> **Tip:** For an organization trail, this resource must be in the master account of the organization.
## Example Usage
### Basic
Enable CloudTrail to capture all compatible management events in region.
For capturing events from services like IAM, `include_global_service_events` must be enabled.
```python
import pulumi
import pulumi_aws as aws
current = aws.get_caller_identity()
bucket = aws.s3.Bucket("bucket")
bucket_policy = aws.s3.BucketPolicy("bucketPolicy",
bucket=bucket.id,
policy=pulumi.Output.all(bucket.id, bucket.id).apply(lambda bucketId, bucketId1: f\"\"\" {{
"Version": "2012-10-17",
"Statement": [
{{
"Sid": "AWSCloudTrailAclCheck",
"Effect": "Allow",
"Principal": {{
"Service": "cloudtrail.amazonaws.com"
}},
"Action": "s3:GetBucketAcl",
"Resource": "arn:aws:s3:::{bucket_id}"
}},
{{
"Sid": "AWSCloudTrailWrite",
"Effect": "Allow",
"Principal": {{
"Service": "cloudtrail.amazonaws.com"
}},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::{bucket_id1}/prefix/AWSLogs/{current.account_id}/*",
"Condition": {{
"StringEquals": {{
"s3:x-amz-acl": "bucket-owner-full-control"
}}
}}
}}
]
}}
\"\"\"))
foobar = aws.cloudtrail.Trail("foobar",
s3_bucket_name=bucket.id,
s3_key_prefix="prefix",
include_global_service_events=False)
```
### Data Event Logging
CloudTrail can log [Data Events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html) for certain services such as S3 bucket objects and Lambda function invocations. Additional information about data event configuration can be found in the following links:
* [CloudTrail API DataResource documentation](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_DataResource.html) (for basic event selector).
* [CloudTrail API AdvancedFieldSelector documentation](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedFieldSelector.html) (for advanced event selector).
### Logging All Lambda Function Invocations By Using Basic Event Selectors
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket")
example = aws.cloudtrail.Trail("example",
s3_bucket_name=bucket.id,
s3_key_prefix="prefix",
event_selectors=[aws.cloudtrail.TrailEventSelectorArgs(
read_write_type="All",
include_management_events=True,
data_resources=[aws.cloudtrail.TrailEventSelectorDataResourceArgs(
type="AWS::Lambda::Function",
values=["arn:aws:lambda"],
)],
)])
```
### Logging All S3 Bucket Object Events By Using Basic Event Selectors
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket")
example = aws.cloudtrail.Trail("example",
s3_bucket_name=bucket.id,
s3_key_prefix="prefix",
event_selectors=[aws.cloudtrail.TrailEventSelectorArgs(
read_write_type="All",
include_management_events=True,
data_resources=[aws.cloudtrail.TrailEventSelectorDataResourceArgs(
type="AWS::S3::Object",
values=["arn:aws:s3:::"],
)],
)])
```
### Logging Individual S3 Bucket Events By Using Basic Event Selectors
```python
import pulumi
import pulumi_aws as aws
important_bucket = aws.s3.get_bucket(bucket="important-bucket")
example = aws.cloudtrail.Trail("example",
s3_bucket_name=important_bucket.id,
s3_key_prefix="prefix",
event_selectors=[aws.cloudtrail.TrailEventSelectorArgs(
read_write_type="All",
include_management_events=True,
data_resources=[aws.cloudtrail.TrailEventSelectorDataResourceArgs(
type="AWS::S3::Object",
values=[f"{important_bucket.arn}/"],
)],
)])
```
### Logging All S3 Bucket Object Events Except For Two S3 Buckets By Using Advanced Event Selectors
```python
import pulumi
import pulumi_aws as aws
not_important_bucket_1 = aws.s3.get_bucket(bucket="not-important-bucket-1")
not_important_bucket_2 = aws.s3.get_bucket(bucket="not-important-bucket-2")
example = aws.cloudtrail.Trail("example", advanced_event_selectors=[
aws.cloudtrail.TrailAdvancedEventSelectorArgs(
field_selectors=[
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["Data"],
field="eventCategory",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
field="resources.ARN",
not_equals=[
f"{not_important_bucket_1.arn}/",
f"{not_important_bucket_2.arn}/",
],
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["AWS::S3::Object"],
field="resources.type",
),
],
name="Log all S3 buckets objects events except for two S3 buckets",
),
aws.cloudtrail.TrailAdvancedEventSelectorArgs(
field_selectors=[aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["Management"],
field="eventCategory",
)],
name="Log readOnly and writeOnly management events",
),
])
```
### Logging Individual S3 Buckets And Specific Event Names By Using Advanced Event Selectors
```python
import pulumi
import pulumi_aws as aws
important_bucket_1 = aws.s3.get_bucket(bucket="important-bucket-1")
important_bucket_2 = aws.s3.get_bucket(bucket="important-bucket-2")
important_bucket_3 = aws.s3.get_bucket(bucket="important-bucket-3")
example = aws.cloudtrail.Trail("example", advanced_event_selectors=[
aws.cloudtrail.TrailAdvancedEventSelectorArgs(
field_selectors=[
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["Data"],
field="eventCategory",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=[
"PutObject",
"DeleteObject",
],
field="eventName",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=[
f"{important_bucket_1.arn}/",
f"{important_bucket_2.arn}/",
],
field="resources.ARN",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["false"],
field="readOnly",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["AWS::S3::Object"],
field="resources.type",
),
],
name="Log PutObject and DeleteObject events for two S3 buckets",
),
aws.cloudtrail.TrailAdvancedEventSelectorArgs(
field_selectors=[
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["Data"],
field="eventCategory",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
field="eventName",
starts_with=["Delete"],
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=[f"{important_bucket_3.arn}/important-prefix"],
field="resources.ARN",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["false"],
field="readOnly",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["AWS::S3::Object"],
field="resources.type",
),
],
name="Log Delete* events for one S3 bucket",
),
])
```
### Sending Events to CloudWatch Logs
```python
import pulumi
import pulumi_aws as aws
current = aws.get_partition()
example_log_group = aws.cloudwatch.LogGroup("exampleLogGroup")
test_role = aws.iam.Role("testRole", assume_role_policy=f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Sid": "",
"Effect": "Allow",
"Principal": {{
"Service": "cloudtrail.{current.dns_suffix}"
}},
"Action": "sts:AssumeRole"
}}
]
}}
\"\"\")
test_role_policy = aws.iam.RolePolicy("testRolePolicy",
role=test_role.id,
policy=f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Sid": "AWSCloudTrailCreateLogStream",
"Effect": "Allow",
"Action": [
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "{aws_cloudwatch_log_group["test"]["arn"]}:*"
}}
]
}}
\"\"\")
bucket = aws.s3.Bucket("bucket")
example_trail = aws.cloudtrail.Trail("exampleTrail",
s3_bucket_name=data["aws_s3_bucket"]["important-bucket"]["id"],
s3_key_prefix="prefix",
cloud_watch_logs_role_arn=test_role.arn,
cloud_watch_logs_group_arn=example_log_group.arn.apply(lambda arn: f"{arn}:*"))
# CloudTrail requires the Log Stream wildcard
```
## Import
Cloudtrails can be imported using the `name`, e.g.,
```sh
$ pulumi import aws:cloudtrail/trail:Trail sample my-sample-trail
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailAdvancedEventSelectorArgs']]]] advanced_event_selectors: Specifies an advanced event selector for enabling data event logging. Fields documented below. Conflicts with `event_selector`.
:param pulumi.Input[str] cloud_watch_logs_group_arn: Log group name using an ARN that represents the log group to which CloudTrail logs will be delivered. Note that CloudTrail requires the Log Stream wildcard.
:param pulumi.Input[str] cloud_watch_logs_role_arn: Role for the CloudWatch Logs endpoint to assume to write to a user’s log group.
:param pulumi.Input[bool] enable_log_file_validation: Whether log file integrity validation is enabled. Defaults to `false`.
:param pulumi.Input[bool] enable_logging: Enables logging for the trail. Defaults to `true`. Setting this to `false` will pause logging.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailEventSelectorArgs']]]] event_selectors: Specifies an event selector for enabling data event logging. Fields documented below. Please note the [CloudTrail limits](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) when configuring these. Conflicts with `advanced_event_selector`.
:param pulumi.Input[bool] include_global_service_events: Whether the trail is publishing events from global services such as IAM to the log files. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailInsightSelectorArgs']]]] insight_selectors: Configuration block for identifying unusual operational activity. See details below.
:param pulumi.Input[bool] is_multi_region_trail: Whether the trail is created in the current region or in all regions. Defaults to `false`.
:param pulumi.Input[bool] is_organization_trail: Whether the trail is an AWS Organizations trail. Organization trails log events for the master account and all member accounts. Can only be created in the organization master account. Defaults to `false`.
:param pulumi.Input[str] kms_key_id: KMS key ARN to use to encrypt the logs delivered by CloudTrail.
:param pulumi.Input[str] name: Specifies the name of the advanced event selector.
:param pulumi.Input[str] s3_bucket_name: Name of the S3 bucket designated for publishing log files.
:param pulumi.Input[str] s3_key_prefix: S3 key prefix that follows the name of the bucket you have designated for log file delivery.
:param pulumi.Input[str] sns_topic_name: Name of the Amazon SNS topic defined for notification of log file delivery.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the trail. If configured with provider defaultTags present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TrailArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a CloudTrail resource.
> **Tip:** For a multi-region trail, this resource must be in the home region of the trail.
> **Tip:** For an organization trail, this resource must be in the master account of the organization.
## Example Usage
### Basic
Enable CloudTrail to capture all compatible management events in region.
For capturing events from services like IAM, `include_global_service_events` must be enabled.
```python
import pulumi
import pulumi_aws as aws
current = aws.get_caller_identity()
bucket = aws.s3.Bucket("bucket")
bucket_policy = aws.s3.BucketPolicy("bucketPolicy",
bucket=bucket.id,
policy=pulumi.Output.all(bucket.id, bucket.id).apply(lambda bucketId, bucketId1: f\"\"\" {{
"Version": "2012-10-17",
"Statement": [
{{
"Sid": "AWSCloudTrailAclCheck",
"Effect": "Allow",
"Principal": {{
"Service": "cloudtrail.amazonaws.com"
}},
"Action": "s3:GetBucketAcl",
"Resource": "arn:aws:s3:::{bucket_id}"
}},
{{
"Sid": "AWSCloudTrailWrite",
"Effect": "Allow",
"Principal": {{
"Service": "cloudtrail.amazonaws.com"
}},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::{bucket_id1}/prefix/AWSLogs/{current.account_id}/*",
"Condition": {{
"StringEquals": {{
"s3:x-amz-acl": "bucket-owner-full-control"
}}
}}
}}
]
}}
\"\"\"))
foobar = aws.cloudtrail.Trail("foobar",
s3_bucket_name=bucket.id,
s3_key_prefix="prefix",
include_global_service_events=False)
```
### Data Event Logging
CloudTrail can log [Data Events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html) for certain services such as S3 bucket objects and Lambda function invocations. Additional information about data event configuration can be found in the following links:
* [CloudTrail API DataResource documentation](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_DataResource.html) (for basic event selector).
* [CloudTrail API AdvancedFieldSelector documentation](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedFieldSelector.html) (for advanced event selector).
### Logging All Lambda Function Invocations By Using Basic Event Selectors
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket")
example = aws.cloudtrail.Trail("example",
s3_bucket_name=bucket.id,
s3_key_prefix="prefix",
event_selectors=[aws.cloudtrail.TrailEventSelectorArgs(
read_write_type="All",
include_management_events=True,
data_resources=[aws.cloudtrail.TrailEventSelectorDataResourceArgs(
type="AWS::Lambda::Function",
values=["arn:aws:lambda"],
)],
)])
```
### Logging All S3 Bucket Object Events By Using Basic Event Selectors
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket")
example = aws.cloudtrail.Trail("example",
s3_bucket_name=bucket.id,
s3_key_prefix="prefix",
event_selectors=[aws.cloudtrail.TrailEventSelectorArgs(
read_write_type="All",
include_management_events=True,
data_resources=[aws.cloudtrail.TrailEventSelectorDataResourceArgs(
type="AWS::S3::Object",
values=["arn:aws:s3:::"],
)],
)])
```
### Logging Individual S3 Bucket Events By Using Basic Event Selectors
```python
import pulumi
import pulumi_aws as aws
important_bucket = aws.s3.get_bucket(bucket="important-bucket")
example = aws.cloudtrail.Trail("example",
s3_bucket_name=important_bucket.id,
s3_key_prefix="prefix",
event_selectors=[aws.cloudtrail.TrailEventSelectorArgs(
read_write_type="All",
include_management_events=True,
data_resources=[aws.cloudtrail.TrailEventSelectorDataResourceArgs(
type="AWS::S3::Object",
values=[f"{important_bucket.arn}/"],
)],
)])
```
### Logging All S3 Bucket Object Events Except For Two S3 Buckets By Using Advanced Event Selectors
```python
import pulumi
import pulumi_aws as aws
not_important_bucket_1 = aws.s3.get_bucket(bucket="not-important-bucket-1")
not_important_bucket_2 = aws.s3.get_bucket(bucket="not-important-bucket-2")
example = aws.cloudtrail.Trail("example", advanced_event_selectors=[
aws.cloudtrail.TrailAdvancedEventSelectorArgs(
field_selectors=[
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["Data"],
field="eventCategory",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
field="resources.ARN",
not_equals=[
f"{not_important_bucket_1.arn}/",
f"{not_important_bucket_2.arn}/",
],
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["AWS::S3::Object"],
field="resources.type",
),
],
name="Log all S3 buckets objects events except for two S3 buckets",
),
aws.cloudtrail.TrailAdvancedEventSelectorArgs(
field_selectors=[aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["Management"],
field="eventCategory",
)],
name="Log readOnly and writeOnly management events",
),
])
```
### Logging Individual S3 Buckets And Specific Event Names By Using Advanced Event Selectors
```python
import pulumi
import pulumi_aws as aws
important_bucket_1 = aws.s3.get_bucket(bucket="important-bucket-1")
important_bucket_2 = aws.s3.get_bucket(bucket="important-bucket-2")
important_bucket_3 = aws.s3.get_bucket(bucket="important-bucket-3")
example = aws.cloudtrail.Trail("example", advanced_event_selectors=[
aws.cloudtrail.TrailAdvancedEventSelectorArgs(
field_selectors=[
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["Data"],
field="eventCategory",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=[
"PutObject",
"DeleteObject",
],
field="eventName",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=[
f"{important_bucket_1.arn}/",
f"{important_bucket_2.arn}/",
],
field="resources.ARN",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["false"],
field="readOnly",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["AWS::S3::Object"],
field="resources.type",
),
],
name="Log PutObject and DeleteObject events for two S3 buckets",
),
aws.cloudtrail.TrailAdvancedEventSelectorArgs(
field_selectors=[
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["Data"],
field="eventCategory",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
field="eventName",
starts_with=["Delete"],
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=[f"{important_bucket_3.arn}/important-prefix"],
field="resources.ARN",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["false"],
field="readOnly",
),
aws.cloudtrail.TrailAdvancedEventSelectorFieldSelectorArgs(
equals=["AWS::S3::Object"],
field="resources.type",
),
],
name="Log Delete* events for one S3 bucket",
),
])
```
### Sending Events to CloudWatch Logs
```python
import pulumi
import pulumi_aws as aws
current = aws.get_partition()
example_log_group = aws.cloudwatch.LogGroup("exampleLogGroup")
test_role = aws.iam.Role("testRole", assume_role_policy=f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Sid": "",
"Effect": "Allow",
"Principal": {{
"Service": "cloudtrail.{current.dns_suffix}"
}},
"Action": "sts:AssumeRole"
}}
]
}}
\"\"\")
test_role_policy = aws.iam.RolePolicy("testRolePolicy",
role=test_role.id,
policy=f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Sid": "AWSCloudTrailCreateLogStream",
"Effect": "Allow",
"Action": [
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "{aws_cloudwatch_log_group["test"]["arn"]}:*"
}}
]
}}
\"\"\")
bucket = aws.s3.Bucket("bucket")
example_trail = aws.cloudtrail.Trail("exampleTrail",
s3_bucket_name=data["aws_s3_bucket"]["important-bucket"]["id"],
s3_key_prefix="prefix",
cloud_watch_logs_role_arn=test_role.arn,
cloud_watch_logs_group_arn=example_log_group.arn.apply(lambda arn: f"{arn}:*"))
# CloudTrail requires the Log Stream wildcard
```
## Import
Cloudtrails can be imported using the `name`, e.g.,
```sh
$ pulumi import aws:cloudtrail/trail:Trail sample my-sample-trail
```
:param str resource_name: The name of the resource.
:param TrailArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TrailArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
advanced_event_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailAdvancedEventSelectorArgs']]]]] = None,
cloud_watch_logs_group_arn: Optional[pulumi.Input[str]] = None,
cloud_watch_logs_role_arn: Optional[pulumi.Input[str]] = None,
enable_log_file_validation: Optional[pulumi.Input[bool]] = None,
enable_logging: Optional[pulumi.Input[bool]] = None,
event_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailEventSelectorArgs']]]]] = None,
include_global_service_events: Optional[pulumi.Input[bool]] = None,
insight_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailInsightSelectorArgs']]]]] = None,
is_multi_region_trail: Optional[pulumi.Input[bool]] = None,
is_organization_trail: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
s3_bucket_name: Optional[pulumi.Input[str]] = None,
s3_key_prefix: Optional[pulumi.Input[str]] = None,
sns_topic_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TrailArgs.__new__(TrailArgs)
__props__.__dict__["advanced_event_selectors"] = advanced_event_selectors
__props__.__dict__["cloud_watch_logs_group_arn"] = cloud_watch_logs_group_arn
__props__.__dict__["cloud_watch_logs_role_arn"] = cloud_watch_logs_role_arn
__props__.__dict__["enable_log_file_validation"] = enable_log_file_validation
__props__.__dict__["enable_logging"] = enable_logging
__props__.__dict__["event_selectors"] = event_selectors
__props__.__dict__["include_global_service_events"] = include_global_service_events
__props__.__dict__["insight_selectors"] = insight_selectors
__props__.__dict__["is_multi_region_trail"] = is_multi_region_trail
__props__.__dict__["is_organization_trail"] = is_organization_trail
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["name"] = name
if s3_bucket_name is None and not opts.urn:
raise TypeError("Missing required property 's3_bucket_name'")
__props__.__dict__["s3_bucket_name"] = s3_bucket_name
__props__.__dict__["s3_key_prefix"] = s3_key_prefix
__props__.__dict__["sns_topic_name"] = sns_topic_name
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["home_region"] = None
__props__.__dict__["tags_all"] = None
super(Trail, __self__).__init__(
'aws:cloudtrail/trail:Trail',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
advanced_event_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailAdvancedEventSelectorArgs']]]]] = None,
arn: Optional[pulumi.Input[str]] = None,
cloud_watch_logs_group_arn: Optional[pulumi.Input[str]] = None,
cloud_watch_logs_role_arn: Optional[pulumi.Input[str]] = None,
enable_log_file_validation: Optional[pulumi.Input[bool]] = None,
enable_logging: Optional[pulumi.Input[bool]] = None,
event_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailEventSelectorArgs']]]]] = None,
home_region: Optional[pulumi.Input[str]] = None,
include_global_service_events: Optional[pulumi.Input[bool]] = None,
insight_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailInsightSelectorArgs']]]]] = None,
is_multi_region_trail: Optional[pulumi.Input[bool]] = None,
is_organization_trail: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
s3_bucket_name: Optional[pulumi.Input[str]] = None,
s3_key_prefix: Optional[pulumi.Input[str]] = None,
sns_topic_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Trail':
"""
Get an existing Trail resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailAdvancedEventSelectorArgs']]]] advanced_event_selectors: Specifies an advanced event selector for enabling data event logging. Fields documented below. Conflicts with `event_selector`.
:param pulumi.Input[str] arn: ARN of the trail.
:param pulumi.Input[str] cloud_watch_logs_group_arn: Log group name using an ARN that represents the log group to which CloudTrail logs will be delivered. Note that CloudTrail requires the Log Stream wildcard.
:param pulumi.Input[str] cloud_watch_logs_role_arn: Role for the CloudWatch Logs endpoint to assume to write to a user’s log group.
:param pulumi.Input[bool] enable_log_file_validation: Whether log file integrity validation is enabled. Defaults to `false`.
:param pulumi.Input[bool] enable_logging: Enables logging for the trail. Defaults to `true`. Setting this to `false` will pause logging.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailEventSelectorArgs']]]] event_selectors: Specifies an event selector for enabling data event logging. Fields documented below. Please note the [CloudTrail limits](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) when configuring these. Conflicts with `advanced_event_selector`.
:param pulumi.Input[str] home_region: Region in which the trail was created.
:param pulumi.Input[bool] include_global_service_events: Whether the trail is publishing events from global services such as IAM to the log files. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrailInsightSelectorArgs']]]] insight_selectors: Configuration block for identifying unusual operational activity. See details below.
:param pulumi.Input[bool] is_multi_region_trail: Whether the trail is created in the current region or in all regions. Defaults to `false`.
:param pulumi.Input[bool] is_organization_trail: Whether the trail is an AWS Organizations trail. Organization trails log events for the master account and all member accounts. Can only be created in the organization master account. Defaults to `false`.
:param pulumi.Input[str] kms_key_id: KMS key ARN to use to encrypt the logs delivered by CloudTrail.
:param pulumi.Input[str] name: Specifies the name of the advanced event selector.
:param pulumi.Input[str] s3_bucket_name: Name of the S3 bucket designated for publishing log files.
:param pulumi.Input[str] s3_key_prefix: S3 key prefix that follows the name of the bucket you have designated for log file delivery.
:param pulumi.Input[str] sns_topic_name: Name of the Amazon SNS topic defined for notification of log file delivery.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the trail. If configured with provider defaultTags present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: Map of tags assigned to the resource, including those inherited from the provider.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TrailState.__new__(_TrailState)
__props__.__dict__["advanced_event_selectors"] = advanced_event_selectors
__props__.__dict__["arn"] = arn
__props__.__dict__["cloud_watch_logs_group_arn"] = cloud_watch_logs_group_arn
__props__.__dict__["cloud_watch_logs_role_arn"] = cloud_watch_logs_role_arn
__props__.__dict__["enable_log_file_validation"] = enable_log_file_validation
__props__.__dict__["enable_logging"] = enable_logging
__props__.__dict__["event_selectors"] = event_selectors
__props__.__dict__["home_region"] = home_region
__props__.__dict__["include_global_service_events"] = include_global_service_events
__props__.__dict__["insight_selectors"] = insight_selectors
__props__.__dict__["is_multi_region_trail"] = is_multi_region_trail
__props__.__dict__["is_organization_trail"] = is_organization_trail
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["name"] = name
__props__.__dict__["s3_bucket_name"] = s3_bucket_name
__props__.__dict__["s3_key_prefix"] = s3_key_prefix
__props__.__dict__["sns_topic_name"] = sns_topic_name
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return Trail(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="advancedEventSelectors")
def advanced_event_selectors(self) -> pulumi.Output[Optional[Sequence['outputs.TrailAdvancedEventSelector']]]:
"""
Specifies an advanced event selector for enabling data event logging. Fields documented below. Conflicts with `event_selector`.
"""
return pulumi.get(self, "advanced_event_selectors")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
ARN of the trail.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="cloudWatchLogsGroupArn")
def cloud_watch_logs_group_arn(self) -> pulumi.Output[Optional[str]]:
"""
Log group name using an ARN that represents the log group to which CloudTrail logs will be delivered. Note that CloudTrail requires the Log Stream wildcard.
"""
return pulumi.get(self, "cloud_watch_logs_group_arn")
@property
@pulumi.getter(name="cloudWatchLogsRoleArn")
def cloud_watch_logs_role_arn(self) -> pulumi.Output[Optional[str]]:
"""
Role for the CloudWatch Logs endpoint to assume to write to a user’s log group.
"""
return pulumi.get(self, "cloud_watch_logs_role_arn")
@property
@pulumi.getter(name="enableLogFileValidation")
def enable_log_file_validation(self) -> pulumi.Output[Optional[bool]]:
"""
Whether log file integrity validation is enabled. Defaults to `false`.
"""
return pulumi.get(self, "enable_log_file_validation")
@property
@pulumi.getter(name="enableLogging")
def enable_logging(self) -> pulumi.Output[Optional[bool]]:
"""
Enables logging for the trail. Defaults to `true`. Setting this to `false` will pause logging.
"""
return pulumi.get(self, "enable_logging")
@property
@pulumi.getter(name="eventSelectors")
def event_selectors(self) -> pulumi.Output[Optional[Sequence['outputs.TrailEventSelector']]]:
"""
Specifies an event selector for enabling data event logging. Fields documented below. Please note the [CloudTrail limits](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) when configuring these. Conflicts with `advanced_event_selector`.
"""
return pulumi.get(self, "event_selectors")
@property
@pulumi.getter(name="homeRegion")
def home_region(self) -> pulumi.Output[str]:
"""
Region in which the trail was created.
"""
return pulumi.get(self, "home_region")
@property
@pulumi.getter(name="includeGlobalServiceEvents")
def include_global_service_events(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the trail is publishing events from global services such as IAM to the log files. Defaults to `true`.
"""
return pulumi.get(self, "include_global_service_events")
@property
@pulumi.getter(name="insightSelectors")
def insight_selectors(self) -> pulumi.Output[Optional[Sequence['outputs.TrailInsightSelector']]]:
"""
Configuration block for identifying unusual operational activity. See details below.
"""
return pulumi.get(self, "insight_selectors")
@property
@pulumi.getter(name="isMultiRegionTrail")
def is_multi_region_trail(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the trail is created in the current region or in all regions. Defaults to `false`.
"""
return pulumi.get(self, "is_multi_region_trail")
@property
@pulumi.getter(name="isOrganizationTrail")
def is_organization_trail(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the trail is an AWS Organizations trail. Organization trails log events for the master account and all member accounts. Can only be created in the organization master account. Defaults to `false`.
"""
return pulumi.get(self, "is_organization_trail")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> pulumi.Output[Optional[str]]:
"""
KMS key ARN to use to encrypt the logs delivered by CloudTrail.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the advanced event selector.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="s3BucketName")
def s3_bucket_name(self) -> pulumi.Output[str]:
"""
Name of the S3 bucket designated for publishing log files.
"""
return pulumi.get(self, "s3_bucket_name")
@property
@pulumi.getter(name="s3KeyPrefix")
def s3_key_prefix(self) -> pulumi.Output[Optional[str]]:
"""
S3 key prefix that follows the name of the bucket you have designated for log file delivery.
"""
return pulumi.get(self, "s3_key_prefix")
@property
@pulumi.getter(name="snsTopicName")
def sns_topic_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the Amazon SNS topic defined for notification of log file delivery.
"""
return pulumi.get(self, "sns_topic_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map of tags to assign to the trail. If configured with provider defaultTags present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
Map of tags assigned to the resource, including those inherited from the provider.
"""
return pulumi.get(self, "tags_all")
| 50.406292 | 389 | 0.640352 |
32de03afe487ee3e2cf4c1c215ac2c60162ea94f | 5,730 | py | Python | src/Utils/Scheduling/_private/_macos/__init__.py | schmouk/ArcheryVideoTraining | 8c7f5fadc485e0b3a0851d0227a26bd799d3eb69 | [
"MIT"
] | null | null | null | src/Utils/Scheduling/_private/_macos/__init__.py | schmouk/ArcheryVideoTraining | 8c7f5fadc485e0b3a0851d0227a26bd799d3eb69 | [
"MIT"
] | 65 | 2021-01-25T22:27:55.000Z | 2021-03-05T10:19:49.000Z | src/Utils/Scheduling/_private/_macos/__init__.py | schmouk/ArcheryVideoTraining | 8c7f5fadc485e0b3a0851d0227a26bd799d3eb69 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Philippe Schmouker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#=============================================================================
from .._scheduler_base import _SchedulerBase
#=============================================================================
class Scheduler( _SchedulerBase):
"""The class of the mpacOS scheduler.
"""
#-------------------------------------------------------------------------
def restore_slice_duration(self) -> None:
'''Restores the previous time slice duration.
This method must be called at the end of any use of the
modified scheduler time slice duration. It is the
counterpart of method 'set_slice_duration()'. The best
way to ensure proper use of both methods is to use
schedulers with Python statement 'with'.
In this base class, this method does nothing. Inherit-
ing classes SHOULD overwrite this method. They can get
access to protected attribute '._slice_duration' set
on call to method 'set_slice_duration()'.
'''
...
super().restore_slice_duration()
#-------------------------------------------------------------------------
def set_thread_background(self, bg: bool = True) -> None:
'''Sets the background status of the currently active thread.
Should be overwritten in inheriting classes, accord-
ing to the underlying OS platform.
In this base class, does nothing.
Args:
bg: bool
Set this to Tue to put the currently active
thread running in background. Set this to
False to restore the currently active thread
initial running status. Defaults to True
(i.e. put thread in background).
'''
...
super().set_thread_background( bg )
#-------------------------------------------------------------------------
def set_thread_priority(self, priority_offset: int) -> None:
'''Modifies the priority level of the currently active thread.
Must be overwritten in inheriting class, according to the
underlying OS platform.
Args:
priority_offset: int
The relative offset applied to the priority level
of the currently active thread. See predefined
values at the end of this base class definition.
'''
...
super().set_thread_priority( priority_offset )
#-------------------------------------------------------------------------
def _platform_clipped(self, slice_duration_ms ) -> int:
'''Returns a clipped value for the passed argument.
This method should be overwritten in inheriting
classes, as long as the minb and max values for time
slices can be asked for to the underlying OS
platform.
In this base class, returns the passed value without
any clipping but a minimal time slice of 1 ms.
Returns:
A time slice duration, expressed in milliseconds,
that is clipped according to min and max values
for time slices according to the underlying OS
platform.
'''
...
super()._platform_clipped( slice_duration_ms )
#-------------------------------------------------------------------------
def _set_slice_duration(self, slice_duration_ms: int) -> None:
'''Modifies the time slices duration.
This is the protected part of the implementation that
relates to the underlying OS platform. It embeds all
the code that is dedicated to the OS platform.
It must be implemented in inheriting classes.
Args:
slice_duration_ms: int
The new duration of time slices to be set for
the OS platform scheduler, expressed as integer
milliseconds. When they are accessible the OS
platform min and max values are used to clip
the passed duration value. If they are not
accessible with the underlying OS platform, it
is the responsibility of the caller to ensure
proper value for the passed argument.
'''
...
super()._set_slice_duration( slice_duration_ms )
#===== end of src.Utils.Scheduling._private._macos.__init__ =====# | 43.082707 | 78 | 0.572775 |
6264313d2daf895f3a80ff81dbd98b1cbc2cdceb | 2,089 | py | Python | ucsmsdk/mometa/fabric/FabricFcEstcCloud.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 78 | 2015-11-30T14:10:05.000Z | 2022-02-13T00:29:08.000Z | ucsmsdk/mometa/fabric/FabricFcEstcCloud.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 113 | 2015-11-20T09:42:46.000Z | 2022-03-16T16:53:29.000Z | ucsmsdk/mometa/fabric/FabricFcEstcCloud.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 86 | 2015-12-12T08:22:18.000Z | 2022-01-23T03:56:34.000Z | """This module contains the general information for FabricFcEstcCloud ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FabricFcEstcCloudConsts:
pass
class FabricFcEstcCloud(ManagedObject):
"""This is FabricFcEstcCloud class."""
consts = FabricFcEstcCloudConsts()
naming_props = set([])
mo_meta = MoMeta("FabricFcEstcCloud", "fabricFcEstcCloud", "fc-estc", VersionMeta.Version141i, "InputOutput", 0x1f, [], ["admin", "ext-san-config", "ext-san-policy"], ['fabricEp'], ['fabricBHVlan', 'fabricFcEstc', 'fabricFcZoneProfile', 'fabricVsan', 'statsThresholdPolicy'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "FabricFcEstcCloud", parent_mo_or_dn, **kwargs)
| 48.581395 | 288 | 0.662518 |
02d83e2d36820392d281aceff22b38221cf85cd7 | 2,660 | py | Python | metaedia.py | TheRealVira/metaedia | 0fa435b31230356199fc9b9e4536657cbcf5bf79 | [
"MIT"
] | 2 | 2021-08-19T14:09:25.000Z | 2021-09-10T01:47:33.000Z | metaedia.py | TheRealVira/metaedia | 0fa435b31230356199fc9b9e4536657cbcf5bf79 | [
"MIT"
] | null | null | null | metaedia.py | TheRealVira/metaedia | 0fa435b31230356199fc9b9e4536657cbcf5bf79 | [
"MIT"
] | 1 | 2021-08-19T13:55:23.000Z | 2021-08-19T13:55:23.000Z | import requests, time, colorama, argparse, re
colorama.init()
GREEN = colorama.Fore.GREEN
RED = colorama.Fore.RED
RESET = colorama.Fore.RESET
discovered_urls = set()
wiki_source = ""
wiki_prefix = ""
start_time = time.time()
url_journey = []
HTML_TAG_REGEX = re.compile(r"<a[^<>] ?href=([\'\"])(.*?)\1", re.IGNORECASE)
def get_links(url, target):
links = []
found = None
for a_tag in HTML_TAG_REGEX.findall(requests.get(url).text):
href = f"{wiki_source}{a_tag[1]}"
if (
href in discovered_urls
or ":" in a_tag[1]
or not a_tag[1].startswith(wiki_prefix)
):
continue
discovered_urls.add(href)
links.append(href)
if href == target:
found = href
break
return (found, links)
def crawl(url, target, max_urls):
currentLinkslist = [url]
for i in range(1, max_urls):
links = []
lastlink = ""
for link in currentLinkslist:
(found, links) = get_links(link, target)
lastlink = link
if found is not None:
url_journey.append(lastlink)
return True
currentLinkslist = links
url_journey.append(lastlink)
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Wikipedia crawler that will find a connection between article A and B."
)
parser.add_argument("article_a", help="Article name A.")
parser.add_argument("article_b", help="Article name B.")
parser.add_argument(
"-w",
"--wiki-source",
help="Wikipedia base URL.",
default="https://de.wikipedia.org",
type=str,
)
parser.add_argument(
"-p",
"--wiki-prefix",
help="Wikipedia article prefix.",
default="/wiki/",
type=str,
)
parser.add_argument(
"-m", "--max-urls", help="Number of max URLs to crawl.", default=50, type=int
)
args = parser.parse_args()
wiki_source = args.wiki_source
wiki_prefix = args.wiki_prefix
article_a = "".join([wiki_source, wiki_prefix, args.article_a])
article_b = "".join([wiki_source, wiki_prefix, args.article_b])
max_urls = args.max_urls
discovered_urls.add(article_a)
if crawl(article_a, article_b, max_urls):
print(f"{GREEN}[!] Article connection found! {RESET}")
for url in url_journey:
print(f"- {url}")
print(f"- {article_b}")
else:
print(f"{RED}[!] Articles aren't connected. {RESET}")
print(f"---- metaedia took {time.time() - start_time} seconds to complete! ----") | 28 | 92 | 0.592481 |
011949330e2cac346820ac2c65c84a441f0c4e03 | 203 | py | Python | python/maya/site-packages/pymel-1.0.5/pymel/core/runtime.py | CountZer0/PipelineConstructionSet | 0aa73a8a63c72989b2d1c677efd78dad4388d335 | [
"BSD-3-Clause"
] | 21 | 2015-04-27T05:01:36.000Z | 2021-11-22T13:45:14.000Z | python/maya/site-packages/pymel-1.0.5/pymel/core/runtime.py | 0xb1dd1e/PipelineConstructionSet | 621349da1b6d1437e95d0c9e48ee9f36d59f19fd | [
"BSD-3-Clause"
] | null | null | null | python/maya/site-packages/pymel-1.0.5/pymel/core/runtime.py | 0xb1dd1e/PipelineConstructionSet | 621349da1b6d1437e95d0c9e48ee9f36d59f19fd | [
"BSD-3-Clause"
] | 7 | 2015-04-11T11:37:19.000Z | 2020-05-22T09:49:04.000Z | """
Runtime commands. These are kept in their own namespace to prevent conflict with other functions and classes.
"""
import pymel.internal.factories as _factories
_factories.createFunctions( __name__ ) | 33.833333 | 109 | 0.807882 |
18732ff78540ba19638ef0fc85a1d20ae9d6d7cb | 2,717 | py | Python | ntnui/tmp/accounts/api/exeline.py | kapteinstein/tdt4290 | 7bc2d2dbdbcc3fd35a05f1d2893d83255803f73b | [
"MIT"
] | null | null | null | ntnui/tmp/accounts/api/exeline.py | kapteinstein/tdt4290 | 7bc2d2dbdbcc3fd35a05f1d2893d83255803f73b | [
"MIT"
] | null | null | null | ntnui/tmp/accounts/api/exeline.py | kapteinstein/tdt4290 | 7bc2d2dbdbcc3fd35a05f1d2893d83255803f73b | [
"MIT"
] | null | null | null | import requests as req
class Exeline(object):
def __init__(self, username, password, requests=None):
if requests is None:
self.requests = req
else:
self.requests = requests
self.username = username
self.password = password
self.base_url = 'http://exceline.net/NTNUI'
self.gyms = {
'1': {'name': 'SiT Gløshaugen', 'id': '1'},
'2': {'name': 'SiT Dragvoll', 'id': '2'},
'3': {'name': 'SiT Portalen', 'id': '3'},
'4': {'name': 'SiT DMMH', 'id': '4'},
'5': {'name': 'SiT Moholt', 'id': '5'},
}
def request(self, url):
r = self.requests.get(self.base_url + url)
return r.json()
def get_url(self, func, gym_id=None, customer_number=None, days=0):
urls = {
'customer_in_gym': '/Member/{}/{}/{}/{}'.format(gym_id, customer_number,
self.username, self.password),
'members_for_gym_since_days': '/Members/{}/{}/{}/{}'.format(gym_id, days, self.username,
self.password),
'members_for_gym': '/Members/{}/{}/{}'.format(gym_id, self.username, self.password),
}
if func in urls:
return urls[func]
raise Exception('Invalid URL function.')
def get_members_for_gym(self, gym_id):
if gym_id not in self.gyms:
raise Exception('Invalid gym id.')
url = self.get_url(func='members_for_gym', gym_id=gym_id)
response = self.request(url)
return response['GetMemberDataResult']['Members']
def get_members_for_all_gyms(self):
results = {}
for gym_id in self.gyms:
results[gym_id] = self.get_members_for_gym(gym_id)
return results
def get_members_for_gym_since(self, gym_id, days):
if gym_id not in self.gyms:
raise Exception('Invalid gym id.')
url = self.get_url(func='members_for_gym_since_days',
gym_id=gym_id, days=days)
response = self.request(url)
return response['GetMemberDataChangesResult']['Members']
def get_members_for_all_gyms_since(self, days):
results = {}
for gym_id in self.gyms:
results[gym_id] = self.get_members_for_gym_since(gym_id, days)
return results
def get_customer_info(self, gym_id, customer_number):
url = self.get_url(func='customer_in_gym',
gym_id=gym_id, customer_number=customer_number)
response = self.request(url)
return response['GetMemberDataResult']['Members']
| 38.267606 | 100 | 0.557968 |
675b9a0c74fc418be0d04e04dca67c262a858c5e | 1,703 | py | Python | s3/prefix.py | Rome84/AWS | 32f5b6a83e37e62b0e33658bdab03ea493c905cb | [
"MIT"
] | null | null | null | s3/prefix.py | Rome84/AWS | 32f5b6a83e37e62b0e33658bdab03ea493c905cb | [
"MIT"
] | null | null | null | s3/prefix.py | Rome84/AWS | 32f5b6a83e37e62b0e33658bdab03ea493c905cb | [
"MIT"
] | null | null | null | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Prefix(object):
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Prefix':
self.name = value
else:
setattr(self, name, value)
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
| 39.604651 | 75 | 0.694069 |
3066a1f4bda208df4f5aba33f42d4ced277cc266 | 19,975 | py | Python | examples/ScanNet/utils.py | c6376315qqso/occuseg | ae7a2d9f0927dec515acf445f0bdd6d28510e915 | [
"BSD-3-Clause"
] | null | null | null | examples/ScanNet/utils.py | c6376315qqso/occuseg | ae7a2d9f0927dec515acf445f0bdd6d28510e915 | [
"BSD-3-Clause"
] | null | null | null | examples/ScanNet/utils.py | c6376315qqso/occuseg | ae7a2d9f0927dec515acf445f0bdd6d28510e915 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch, numpy as np
import plyfile
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from tensorboardX import SummaryWriter
import pdb
def get_ori_label():
ori_label = [1,2,3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39]
return ori_label
# color palette for nyu40 labels
def create_color_palette():
return [
(0, 0, 0),
(174, 199, 232), # wall
(152, 223, 138), # floor
(31, 119, 180), # cabinet
(255, 187, 120), # bed
(188, 189, 34), # chair
(140, 86, 75), # sofa
(255, 152, 150), # table
(214, 39, 40), # door
(197, 176, 213), # window
(148, 103, 189), # bookshelf
(196, 156, 148), # picture
(23, 190, 207), # counter
(178, 76, 76),
(247, 182, 210), # desk
(66, 188, 102),
(219, 219, 141), # curtain
(140, 57, 197),
(202, 185, 52),
(51, 176, 203),
(200, 54, 131),
(92, 193, 61),
(78, 71, 183),
(172, 114, 82),
(255, 127, 14), # refrigerator
(91, 163, 138),
(153, 98, 156),
(140, 153, 101),
(158, 218, 229), # shower curtain
(100, 125, 154),
(178, 127, 135),
(120, 185, 128),
(146, 111, 194),
(44, 160, 44), # toilet
(112, 128, 144), # sink
(96, 207, 209),
(227, 119, 194), # bathtub
(213, 92, 176),
(94, 106, 211),
(82, 84, 163), # otherfurn
(100, 85, 144)
]
def to_origianl_label(labels):
labelMapping = get_ori_label()
oriLabel = np.zeros([labels.shape[0]], dtype=np.int32)
for i in range(labels.shape[0]):
label = labels[i]
if label >= 0:
oriLabel[i] = labelMapping[label]
return oriLabel
def label2color(labels):
oriLabel = get_ori_label()
color_palette = create_color_palette()
color = np.zeros([labels.shape[0],3])
for i in range(labels.shape[0]):
label = labels[i]
if label >= 0:
oL = oriLabel[label]
color[i,0] = color_palette[oL][0]
color[i,1] = color_palette[oL][1]
color[i,2] = color_palette[oL][2]
else:
color[i,0] = 0
color[i,1] = 0
color[i,2] = 0
color = color / 255
return color
def visualize_label(batch,predictions,rep):
pred_ids = predictions.max(1)[1]
index_list = batch['idxs']
index_list = torch.cat(index_list,0)
[locs, feats, normals] = batch['x']
fns = batch['pth_file']
fn2s = []
fn3s = []
plyFiles = []
for fn in fns:
fn2 = fn[:-11]+'.ply'
fn3 = fn[:-3]+'labels.predict.ply'
print(fn2, fn3)
a = plyfile.PlyData().read(fn2)
fn2s.append(fn2)
fn3s.append(fn3)
plyFiles.append(a)
oriLabel = get_ori_label()
color_palette = create_color_palette()
new_pos = np.cumsum(index_list) - 1
point_start_cnt = 0
for point_cloud_id,plyFile in enumerate(plyFiles):
point_num = len(plyFile.elements[0]['red'])
for i in range(point_num):
pred_id = pred_ids[new_pos[point_start_cnt + i]]
if(pred_id >= 0):
w=oriLabel[pred_id]
r = color_palette[w][0]
g = color_palette[w][1]
b = color_palette[w][2]
else:
r = 0
g = 0
b = 0
plyFile.elements[0]['red'][i] = r
plyFile.elements[0]['green'][i] = g
plyFile.elements[0]['blue'][i] = b
point_start_cnt = point_start_cnt + point_num
print("write file: ", fn3s[point_cloud_id])
plyFile.write(fn3s[point_cloud_id])
SELECTED_LABEL_IDS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
#Predictions will all be in the set {0,1,...,19}
VALID_CLASS_IDS = range(0, len(SELECTED_LABEL_IDS))
#label id to label name mapping: http://dovahkiin.stanford.edu/scannet-public/v1/tasks/scannet-labels.combined.tsv
LABEL_ID_TO_LABEL_NAME = {
1: 'wall',
2: 'chair',
3: 'floor',
4: 'table',
5: 'door',
6: 'couch',
7: 'cabinet',
8: 'shelf',
9: 'desk',
10: 'office chair',
11: 'bed',
12: 'trashcan',
13: 'pillow',
14: 'sink',
15: 'picture',
16: 'window',
17: 'toilet',
18: 'bookshelf',
19: 'monitor',
20: 'computer',
21: 'curtain',
22: 'book',
23: 'armchair',
24: 'coffee table',
25: 'drawer',
26: 'box',
27: 'refrigerator',
28: 'lamp',
29: 'kitchen cabinet',
30: 'dining chair',
31: 'towel',
32: 'clothes',
33: 'tv',
34: 'nightstand',
35: 'counter',
36: 'dresser',
37: 'countertop',
38: 'stool',
39: 'cushion',
}
METRIC_ID_TO_NAME = {0: 'iou',
1: 'tp',
2: 'denom',
3: 'fp',
4: 'fn',
}
#Classes relabelled
"""
CLASS_LABELS = []
for i, x in enumerate(SELECTED_LABEL_IDS):
# print(i, LABEL_ID_TO_LABEL_NAME[x])
CLASS_LABELS.append(LABEL_ID_TO_LABEL_NAME[x])
"""
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
INSTANCE_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
def confusion_matrix(pred_ids, gt_ids, class_num):
assert pred_ids.shape == gt_ids.shape, (pred_ids.shape, gt_ids.shape)
idxs= gt_ids>=0
return np.bincount(pred_ids[idxs]*class_num+gt_ids[idxs],minlength=class_num*class_num).reshape((class_num,class_num)).astype(np.ulonglong)
def get_iou(label_id, confusion):
# true positives
tp = np.longlong(confusion[label_id, label_id])
# false negatives
fp = np.longlong(confusion[label_id, :].sum()) - tp
fn = np.longlong(confusion[:, label_id].sum()) - tp
denom = (tp + fp + fn)
if tp + fn == 0:
return False
return (float(tp) / denom, tp, denom, fp, fn)
def evaluate_scannet(pred_ids,gt_ids,train_writer,iter_id,class_num,topic = 'valid'):
print('evaluating', gt_ids.size, 'points...')
confusion=confusion_matrix(pred_ids,gt_ids,class_num)
class_ious = {}
for i in range(len(VALID_CLASS_IDS)):
label_name = CLASS_LABELS[i]
label_id = VALID_CLASS_IDS[i]
class_iou = get_iou(label_id, confusion)
if class_iou is not False:
class_ious[label_name] = get_iou(label_id, confusion)
sum_iou = 0
for label_name in class_ious:
sum_iou+=class_ious[label_name][0]
mean_iou = sum_iou/len(class_ious)
if topic == 'valid':
print('classes IoU tp denom fp fn')
print('----------------------------')
for i in range(len(VALID_CLASS_IDS)):
label_name = CLASS_LABELS[i]
if label_name in class_ious:
print('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d}/{4:>6d}/{5:<6d})'.format(
label_name,
class_ious[label_name][0],
class_ious[label_name][1],
class_ious[label_name][2],
class_ious[label_name][3],
class_ious[label_name][4]))
else:
print('{0:<14s}: {1}'.format(label_name, 'missing'))
for cate_id in range(5):
if(label_name in class_ious):
train_writer.add_scalar("{}/category/{}/{}".format(topic,label_name.replace(' ', '_'),
METRIC_ID_TO_NAME[cate_id]),
class_ious[label_name][cate_id], global_step=iter_id)
print('mean IOU', mean_iou)
train_writer.add_scalar(topic+"/overall_iou", mean_iou, iter_id)
return mean_iou
def evaluate_single_scan(pred_ids,gt_ids,train_writer,iter_id,class_num,topic = 'valid'):
confusion=confusion_matrix(pred_ids,gt_ids,class_num)
#print("[DEBUG_utils.py]", pred_ids, gt_ids, iter_id, class_num, confusion.shape)
class_ious = {}
for i in range(len(VALID_CLASS_IDS)):
label_name = CLASS_LABELS[i]
label_id = VALID_CLASS_IDS[i]
class_iou = get_iou(label_id, confusion)
if class_iou is not False:
class_ious[label_name] = get_iou(label_id, confusion)
sum_iou = 0
count = 0
for label_name in class_ious:
if class_ious[label_name][0] > 0.01:
sum_iou+=class_ious[label_name][0]
count = count + 1
mean_iou = sum_iou/count if count else 0
if topic == 'valid':
print('classes IoU tp denom fp fn')
print('----------------------------')
for i in range(len(VALID_CLASS_IDS)):
label_name = CLASS_LABELS[i]
if label_name in class_ious:
print('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d}/{4:>6d}/{5:<6d})'.format(
label_name,
class_ious[label_name][0],
class_ious[label_name][1],
class_ious[label_name][2],
class_ious[label_name][3],
class_ious[label_name][4]))
else:
print('{0:<14s}: {1}'.format(label_name, 'missing'))
print('mean IOU', mean_iou)
if train_writer is not None:
if not topic == 'valid':
for i in range(len(VALID_CLASS_IDS)):
label_name = CLASS_LABELS[i]
if label_name in class_ious:
for cate_id in range(5):
if train_writer is not None:
train_writer.add_scalar("{}/category/{}/{}".format(topic,label_name.replace(' ', '_'),
METRIC_ID_TO_NAME[cate_id]),
class_ious[label_name][cate_id], global_step=iter_id)
train_writer.add_scalar(topic+"/overall_iou", mean_iou, iter_id)
return mean_iou
class stanford_params:
def __init__(self):
self.class_freq = np.asarray([19.203, 16.566, 27.329,
2.428, 2.132, 2.123, 5.494, 3.25,
4.079, 0.488, 4.726, 1.264, 10.918, 100.0])
self.class_weights = -np.log(self.class_freq / 100.0)
self.num_classes = len(self.class_freq)
self.color_map = [ [128, 128, 128], # ceiling (red)
[124, 152, 0], # floor (green)
[255, 225, 25], # walls (yellow)
[0, 130, 200], # beam (blue)
[245, 130, 48], # column (orange)
[145, 30, 180], # window (purple)
[0, 130, 200], # door (cyan)
[0, 0, 128], # table (black)
[128, 0, 0], # chair (maroon)
[250, 190, 190], # sofa (pink)
[170, 110, 40], # bookcase (teal)
[0, 0, 0], # board (navy)
[170, 110, 40], # clutter (brown)
[128, 128, 128]] # stairs (grey)
self.class_name = ['ceiling','floor','walls','beam','column','window','door','table','chair','sofa','bookcase','board','clutter','stairs']
def evaluate_stanford3D(pred_ids,gt_ids,train_writer,iter_id, class_num = 20,topic = 'valid'):
print('evaluating', gt_ids.size, 'points...')
confusion=confusion_matrix(pred_ids,gt_ids,class_num)
class_ious = {}
dataset = stanford_params()
num_classes = dataset.num_classes
for i in range(num_classes):
label_name = dataset.class_name[i]
label_id = i
tp = np.longlong(confusion[label_id, label_id])
fp = np.longlong(confusion[label_id, :].sum()) - tp
not_ignored = [l for l in range(num_classes) if not l == label_id]
fn = np.longlong(confusion[not_ignored, label_id].sum())
denom = (tp + fp + fn)
if denom > 0 and (tp + fn) > 0:
class_ious[label_name] = (float(tp) / denom, tp, denom, fp, fn)
sum_iou = 0
for label_name in class_ious:
sum_iou+=class_ious[label_name][0]
mean_iou = sum_iou/len(class_ious)
print('classes IoU fp fn')
print('----------------------------')
for i in range(num_classes):
label_name = dataset.class_name[i]
if label_name in class_ious:
print('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d}/{4:>6d}/{5:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2],class_ious[label_name][3],class_ious[label_name][4]))
else:
print('{0:<14s}: {1}'.format(label_name, 'missing'))
print('mean IOU', mean_iou)
train_writer.add_scalar(topic+"/overall_iou", mean_iou, iter_id)
return mean_iou
class scenenn_params:
def __init__(self):
# self.class_freq = np.asarray([19.203, 16.566, 27.329,
# 2.428, 2.132, 2.123, 5.494, 3.25,
# 4.079, 0.488, 4.726, 1.264, 10.918, 100.0])
# self.class_weights = -np.log(self.class_freq / 100.0)
self.num_classes = 10
# self.color_map = [ [128, 128, 128], # ceiling (red)
# [124, 152, 0], # floor (green)
# [255, 225, 25], # walls (yellow)
# [0, 130, 200], # beam (blue)
# [245, 130, 48], # column (orange)
# [145, 30, 180], # window (purple)
# [0, 130, 200], # door (cyan)
# [0, 0, 128], # table (black)
# [128, 0, 0], # chair (maroon)
# [250, 190, 190], # sofa (pink)
# [170, 110, 40], # bookcase (teal)
# [0, 0, 0], # board (navy)
# [170, 110, 40], # clutter (brown)
# [128, 128, 128]] # stairs (grey)
self.class_name = ['wall','floor','cabinet','bed','chair','sofa','table','desk','television','other-prop']
def evaluate_scenenn(pred_ids,gt_ids,train_writer,iter_id, class_num = 10,topic = 'valid'):
print('evaluating', gt_ids.size, 'points...')
confusion=confusion_matrix(pred_ids,gt_ids,class_num)
class_ious = {}
dataset = scenenn_params()
num_classes = dataset.num_classes
for i in range(num_classes):
label_name = dataset.class_name[i]
label_id = i
tp = np.longlong(confusion[label_id, label_id])
fp = np.longlong(confusion[label_id, :].sum()) - tp
not_ignored = [l for l in range(num_classes) if not l == label_id]
fn = np.longlong(confusion[not_ignored, label_id].sum())
denom = (tp + fp + fn)
if denom > 0 and (tp + fn) > 0:
class_ious[label_name] = (float(tp) / denom, tp, denom, fp, fn)
sum_iou = 0
for label_name in class_ious:
sum_iou+=class_ious[label_name][0]
mean_iou = sum_iou/len(class_ious)
print('classes IoU fp fn')
print('----------------------------')
for i in range(num_classes):
label_name = dataset.class_name[i]
if label_name in class_ious:
print('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d}/{4:>6d}/{5:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2],class_ious[label_name][3],class_ious[label_name][4]))
else:
print('{0:<14s}: {1}'.format(label_name, 'missing'))
print('mean IOU', mean_iou)
train_writer.add_scalar(topic+"/overall_iou", mean_iou, iter_id)
return mean_iou
class FocalLoss(nn.Module):
def __init__(self):
super(FocalLoss, self).__init__()
self.weight = [0.3005,0.2700,0.0418,0.0275,0.0810,0.0254,0.0462,0.0418,0.0297,0.0277,0.0061,0.0065,0.0194,0.0150,0.0060,0.0036,0.0029,0.0025,0.0029,0.0434]
def forward(self, p, target):
idx = target >= 0
p1 = p[idx]
target = target[idx]
prob = torch.exp(p1)
pt = torch.gather(prob, 1, target.view(-1, 1))
pt = torch.div(pt.view(-1, 1), torch.sum(prob, 1).view(-1, 1))
modulator = (1 - pt) ** 2
loss = -(modulator * torch.log(pt)).mean()
return loss
def cost2color(prob,target):
target = torch.from_numpy(target)
idx = target >= 0
idx = torch.from_numpy(np.array(idx, dtype=np.uint8))
p1 = prob[idx]
target = target[idx]
x_class = torch.gather(p1, 1, target.view(-1, 1))
loss = -x_class + torch.logsumexp(p1, 1).view(-1, 1)
vmin = None
vmax = None
viridis = cm.get_cmap('viridis', 512)
norm = plt.Normalize(vmin, vmax)
colors = viridis(norm(loss.cpu().data.numpy()))
return colors[:,0,0:3]
class WeightedCrossEntropyLoss(nn.Module):
def __init__(self,weight):
super(WeightedCrossEntropyLoss, self).__init__()
self.weight = weight
def forward(self, p, target):
# weight is independent to traing, backward propogation
idx = target >= 0
p1 = p[idx]
target = target[idx]
with torch.no_grad():
predicted = p1.max(1)[1]
pw = torch.gather(self.weight.view(-1, 1), 0, predicted.view(-1, 1))
pt = torch.gather(self.weight.view(-1, 1), 0, target.view(-1, 1))
weight = 1 / (0.01 + torch.min(pw, pt))
weight = weight.detach()
x_class = torch.gather(p1, 1, target.view(-1, 1))
loss = -x_class + torch.logsumexp(p1, 1).view(-1, 1)
loss = weight * loss
return loss.mean()
| 39.632937 | 240 | 0.499124 |
3f7ab6df46fff56006c51fd5bf5469af9696b3b8 | 2,633 | py | Python | src/questions/models.py | Maelstroms38/Matchmaker | 082ff442901fcfc8ecb3b968f8197a792344b901 | [
"MIT"
] | null | null | null | src/questions/models.py | Maelstroms38/Matchmaker | 082ff442901fcfc8ecb3b968f8197a792344b901 | [
"MIT"
] | null | null | null | src/questions/models.py | Maelstroms38/Matchmaker | 082ff442901fcfc8ecb3b968f8197a792344b901 | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
# Create your models here.
class Question(models.Model):
text = models.TextField()
active = models.BooleanField(default=True)
draft = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
#answers = models.ManyToManyField('Answer')
def __unicode__(self):
return self.text[:10]
class Answer(models.Model):
questions = models.ForeignKey(Question)
text = models.CharField(max_length=120)
active = models.BooleanField(default=True)
draft = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
def __unicode__(self):
return self.text[:10]
LEVELS = (
('Expert', 'Expert'),
('Intermediate', 'Intermediate'),
('Beginner', 'Beginner'),
)
class UserAnswer(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
question = models.ForeignKey(Question)
my_answer = models.ForeignKey(Answer, related_name='user_answer')
my_answer_level = models.CharField(max_length=50, choices=LEVELS)
my_points = models.IntegerField(default=-1)
tutor_answer = models.ForeignKey(Answer, null=True, blank=True, related_name='match_answer')
tutor_answer_level = models.CharField(max_length=50, choices=LEVELS)
tutor_points = models.IntegerField(default=-1)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
def __unicode__(self):
return self.my_answer.text[:10]
def score_importance(importance_level):
if importance_level == "Expert":
points = 300
if importance_level == "Intermediate":
points = 200
if importance_level == "Beginner":
points = 100
elif importance_level == "No Experience":
points = 0
else:
points = 0
return points
@receiver(pre_save, sender=UserAnswer)
def update_user_answer_score(sender, instance, *args, **kwargs):
my_points = score_importance(instance.my_answer_level)
instance.my_points = my_points
tutor_points = score_importance(instance.tutor_answer_level)
instance.tutor_points = tutor_points
# def update_user_answer_score(sender, instance, created, *args, **kwargs):
# print sender
# print instance
# print created
# if instance.my_points == -1:
# my_points = score_importance(instance.my_answer_level)
# instance.my_points = my_points
# instance.save()
# if instance.tutor_points == -1:
# tutor_points = score_importance(instance.tutor_answer_level)
# instance.tutor_points = tutor_points
# instance.save()
# post_save.connect(update_user_answer_score, sender=UserAnswer) | 33.75641 | 93 | 0.773262 |
abaf4597a9aac7c5ad8c2e3d8bfb47d865cd6375 | 117 | py | Python | main.py | ongzhixian/bukit-bintang | be41abe81bee13a33fead806438a739a6e8aa4c7 | [
"MIT"
] | null | null | null | main.py | ongzhixian/bukit-bintang | be41abe81bee13a33fead806438a739a6e8aa4c7 | [
"MIT"
] | null | null | null | main.py | ongzhixian/bukit-bintang | be41abe81bee13a33fead806438a739a6e8aa4c7 | [
"MIT"
] | null | null | null | from modules.sample_game import SampleGame1
if __name__ == "__main__":
game = SampleGame1()
game.run_game()
| 19.5 | 43 | 0.717949 |
8ddeb2f64b0c1a6b2ec4934af17c88253e5a26f4 | 1,508 | py | Python | Python for Data Analysis and Visualisation/Lecture 9- Indexing arrays.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | Python for Data Analysis and Visualisation/Lecture 9- Indexing arrays.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | Python for Data Analysis and Visualisation/Lecture 9- Indexing arrays.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | """Tools:
arr.copy() --> explicitly makes a copy of an array. By default, all slices etc are VIEWS of the original array.
"""
import numpy as np
# this should create an array starting at zero and ending at 10, gap 1
arr = np.arange(0,11)
print(arr)
# Calling the value at 8
print(arr[8])
# slicing
print(arr[1:5])
# set a value over a range:
arr[0:5] = 100
print(arr)
#resetting
arr = np.arange(0,11)
#creating a slice of the array
slice_of_arr = arr[0:6]
print(slice_of_arr)
# set ALL the elements and set values
slice_of_arr[:] = 99
print(slice_of_arr)
# Keep in mind that slices of arrays etc are just VIEWS of arrays by default
print(arr)
# to make a copy
arr_copy =arr.copy()
#now indexing in 2d array...
arr_2d = np.array([[5,10,15],[20,25,30],[35,40,45]])
print(arr_2d)
# to pull up the first row... which the first entry in the 2d array
print(arr_2d[0])
# to pull up individal values
print(arr_2d[0][0]) # the first value is the row, the 2nd is the column
#2d array slicing to get the top right hand 2x2 array
# we want rows up to the 3rd row and excluding the first column
print(arr_2d[:2,1:])
# fancy indexing
arr2d = np.zeros((10,10))
print(arr2d)
arr_length = arr2d.shape[1]
print(arr_length)
# I predict that this should produce an array of 10 columns and 10 rows, rows progressing from 0 by 1 each time
for i in range(arr_length):
arr2d[i] = i
print(arr2d) # and I was correct!
# Now we can select individual rows in any order we choose
arr2d[[2,4,6,8]] | 21.855072 | 112 | 0.704244 |
de3175cf404da7dad2059b7f1bcc3e72afcec76c | 4,983 | py | Python | src/analyze.py | quocpp/RTH | 4cf39044e18ad23c9c02753fcbe6780e4430e2d8 | [
"MIT"
] | null | null | null | src/analyze.py | quocpp/RTH | 4cf39044e18ad23c9c02753fcbe6780e4430e2d8 | [
"MIT"
] | null | null | null | src/analyze.py | quocpp/RTH | 4cf39044e18ad23c9c02753fcbe6780e4430e2d8 | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import print_function
import sys
import math
import pickle
import copy
import numpy as np
import cv2
import matplotlib.pyplot as plt
from DataLoader import Batch
from Model import Model, DecoderType
from SamplePreprocessor import preprocess
# constants like filepaths
class Constants:
"filenames and paths to data"
fnCharList = '../model/charList.txt'
fnAnalyze = '../data/analyze.png'
fnPixelRelevance = '../data/pixelRelevance.npy'
fnTranslationInvariance = '../data/translationInvariance.npy'
fnTranslationInvarianceTexts = '../data/translationInvarianceTexts.pickle'
gtText = 'are'
distribution = 'histogram' # 'histogram' or 'uniform'
def odds(val):
return val / (1 - val)
def weightOfEvidence(origProb, margProb):
return math.log2(odds(origProb)) - math.log2(odds(margProb))
def analyzePixelRelevance():
"simplified implementation of paper: Zintgraf et al - Visualizing Deep Neural Network Decisions: Prediction Difference Analysis"
# setup model
model = Model(open(Constants.fnCharList).read(), DecoderType.BestPath, mustRestore=True)
# read image and specify ground-truth text
img = cv2.imread(Constants.fnAnalyze, cv2.IMREAD_GRAYSCALE)
(w, h) = img.shape
#assert Model.imgSize[1] == w
# compute probability of gt text in original image
batch = Batch([Constants.gtText], [preprocess(img, Model.imgSize)])
(_, probs) = model.inferBatch(batch, calcProbability=True, probabilityOfGT=True)
origProb = probs[0]
grayValues = [0, 63, 127, 191, 255]
if Constants.distribution == 'histogram':
bins = [0, 31, 95, 159, 223, 255]
(hist, _) = np.histogram(img, bins=bins)
pixelProb = hist / sum(hist)
elif Constants.distribution == 'uniform':
pixelProb = [1.0 / len(grayValues) for _ in grayValues]
else:
raise Exception('unknown value for Constants.distribution')
# iterate over all pixels in image
pixelRelevance = np.zeros(img.shape, np.float32)
for x in range(w):
for y in range(h):
# try a subset of possible grayvalues of pixel (x,y)
imgsMarginalized = []
for g in grayValues:
imgChanged = copy.deepcopy(img)
imgChanged[x, y] = g
imgsMarginalized.append(preprocess(imgChanged, Model.imgSize))
# put them all into one batch
batch = Batch([Constants.gtText]*len(imgsMarginalized), imgsMarginalized)
# compute probabilities
(_, probs) = model.inferBatch(batch, calcProbability=True, probabilityOfGT=True)
# marginalize over pixel value (assume uniform distribution)
margProb = sum([probs[i] * pixelProb[i] for i in range(len(grayValues))])
pixelRelevance[x, y] = weightOfEvidence(origProb, margProb)
print(x, y, pixelRelevance[x, y], origProb, margProb)
np.save(Constants.fnPixelRelevance, pixelRelevance)
def analyzeTranslationInvariance():
# setup model
model = Model(open(Constants.fnCharList).read(), DecoderType.BestPath, mustRestore=True)
# read image and specify ground-truth text
img = cv2.imread(Constants.fnAnalyze, cv2.IMREAD_GRAYSCALE)
img1 = preprocess(cv2.imread(Constants.fnAnalyze, cv2.IMREAD_GRAYSCALE), Model.imgSize)
(w, h) = img.shape
#assert Model.imgSize[1] == w
imgList = []
for dy in range(Model.imgSize[0]-h+1):
targetImg = np.ones((Model.imgSize[1], Model.imgSize[0])) * 255
targetImg[:,dy:h+dy] = img
imgList.append(preprocess(targetImg, Model.imgSize))
# put images and gt texts into batch
batch = Batch([Constants.gtText]*len(imgList), imgList)
# compute probabilities
(texts, probs) = model.inferBatch(batch, calcProbability=True, probabilityOfGT=True)
# save results to file
f = open(Constants.fnTranslationInvarianceTexts, 'wb')
pickle.dump(texts, f)
f.close()
np.save(Constants.fnTranslationInvariance, probs)
def showResults():
# 1. pixel relevance
pixelRelevance = np.load(Constants.fnPixelRelevance)
plt.figure('Pixel relevance')
plt.imshow(pixelRelevance, cmap=plt.cm.jet, vmin=-0.25, vmax=0.25)
plt.colorbar()
img = cv2.imread(Constants.fnAnalyze, cv2.IMREAD_GRAYSCALE)
plt.imshow(img, cmap=plt.cm.gray, alpha=.4)
# 2. translation invariance
probs = np.load(Constants.fnTranslationInvariance)
f = open(Constants.fnTranslationInvarianceTexts, 'rb')
texts = pickle.load(f)
texts = ['%d:'%i + texts[i] for i in range(len(texts))]
f.close()
plt.figure('Translation invariance')
plt.plot(probs, 'o-')
plt.xticks(np.arange(len(texts)), texts, rotation='vertical')
plt.xlabel('horizontal translation and best path')
plt.ylabel('text probability of "%s"'%Constants.gtText)
# show both plots
plt.show()
if __name__ == '__main__':
Constants.fnAnalyzae = sys.argv[2];
Constants.gtText = sys.argv[3];
if len(sys.argv)>1:
if sys.argv[1]=='--relevance':
print('Analyze pixel relevance')
analyzePixelRelevance()
elif sys.argv[1]=='--invariance':
print('Analyze translation invariance')
analyzeTranslationInvariance()
else:
showResults()
else:
print('Show results')
showResults()
| 30.018072 | 129 | 0.730283 |
919d4cb2b1fbf8521bf8df30d28666a7732eb3a9 | 16,843 | py | Python | novaclient/tests/unit/test_utils.py | dtroyer/python-novaclient | 4c483322fe5454c8ece66cc9c86cbc0702e14368 | [
"Apache-1.1"
] | null | null | null | novaclient/tests/unit/test_utils.py | dtroyer/python-novaclient | 4c483322fe5454c8ece66cc9c86cbc0702e14368 | [
"Apache-1.1"
] | null | null | null | novaclient/tests/unit/test_utils.py | dtroyer/python-novaclient | 4c483322fe5454c8ece66cc9c86cbc0702e14368 | [
"Apache-1.1"
] | 1 | 2019-01-11T16:15:52.000Z | 2019-01-11T16:15:52.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_utils import encodeutils
import six
from six.moves.urllib import parse
from novaclient import base
from novaclient import exceptions
from novaclient.tests.unit import fakes
from novaclient.tests.unit import utils as test_utils
from novaclient import utils
UUID = '8e8ec658-c7b0-4243-bdf8-6f7f2952c0d0'
class FakeResource(object):
NAME_ATTR = 'name'
request_ids = fakes.FAKE_REQUEST_ID_LIST
def __init__(self, _id, properties):
self.id = _id
try:
self.name = properties['name']
except KeyError:
pass
def append_request_ids(self, resp):
pass
class FakeManager(base.ManagerWithFind):
resource_class = FakeResource
resources = [
FakeResource('1234', {'name': 'entity_one'}),
FakeResource('12345', {'name': 'UPPER'}),
FakeResource('123456', {'name': 'lower'}),
FakeResource('1234567', {'name': 'Mixed'}),
FakeResource('12345678', {'name': 'mixed'}),
FakeResource(UUID, {'name': 'entity_two'}),
FakeResource('5678', {'name': '9876'}),
FakeResource('01234', {'name': 'entity_three'})
]
is_alphanum_id_allowed = None
def __init__(self, alphanum_id_allowed=False):
self.is_alphanum_id_allowed = alphanum_id_allowed
def get(self, resource_id):
for resource in self.resources:
if resource.id == str(resource_id):
return resource
raise exceptions.NotFound(resource_id)
def list(self):
return base.ListWithMeta(self.resources, fakes.FAKE_REQUEST_ID_LIST)
class FakeDisplayResource(object):
NAME_ATTR = 'display_name'
def __init__(self, _id, properties):
self.id = _id
try:
self.display_name = properties['display_name']
except KeyError:
pass
def append_request_ids(self, resp):
pass
class FakeDisplayManager(FakeManager):
resource_class = FakeDisplayResource
resources = [
FakeDisplayResource('4242', {'display_name': 'entity_three'}),
]
class FindResourceTestCase(test_utils.TestCase):
def setUp(self):
super(FindResourceTestCase, self).setUp()
self.manager = FakeManager(None)
def test_find_none(self):
"""Test a few non-valid inputs."""
self.assertRaises(exceptions.CommandError,
utils.find_resource,
self.manager,
'asdf')
self.assertRaises(exceptions.CommandError,
utils.find_resource,
self.manager,
None)
self.assertRaises(exceptions.CommandError,
utils.find_resource,
self.manager,
{})
def test_find_by_integer_id(self):
output = utils.find_resource(self.manager, 1234)
self.assertEqual(output, self.manager.get('1234'))
def test_find_by_str_id(self):
output = utils.find_resource(self.manager, '1234')
self.assertEqual(output, self.manager.get('1234'))
def test_find_by_uuid(self):
output = utils.find_resource(self.manager, UUID)
self.assertEqual(output, self.manager.get(UUID))
def test_find_by_str_name(self):
output = utils.find_resource(self.manager, 'entity_one')
self.assertEqual(output, self.manager.get('1234'))
def test_find_by_str_upper_name(self):
output = utils.find_resource(self.manager, 'UPPER')
self.assertEqual(output, self.manager.get('12345'))
def test_find_by_str_lower_name(self):
output = utils.find_resource(self.manager, 'lower')
self.assertEqual(output, self.manager.get('123456'))
def test_find_by_str_mix_name(self):
output = utils.find_resource(self.manager, 'Mixed')
self.assertEqual(output, self.manager.get('1234567'))
def test_find_by_str_lower_name_mixed(self):
output = utils.find_resource(self.manager, 'mixed')
self.assertEqual(output, self.manager.get('12345678'))
def test_find_by_str_display_name(self):
display_manager = FakeDisplayManager(None)
output = utils.find_resource(display_manager, 'entity_three')
self.assertEqual(output, display_manager.get('4242'))
def test_find_in_alphanum_allowed_manager_by_str_id_(self):
alphanum_manager = FakeManager(True)
output = utils.find_resource(alphanum_manager, '01234')
self.assertEqual(output, alphanum_manager.get('01234'))
def test_find_without_wrapping_exception(self):
alphanum_manager = FakeManager(True)
self.assertRaises(exceptions.NotFound, utils.find_resource,
alphanum_manager, 'not_exist', wrap_exception=False)
res = alphanum_manager.resources[0]
alphanum_manager.resources.append(res)
self.assertRaises(exceptions.NoUniqueMatch, utils.find_resource,
alphanum_manager, res.name, wrap_exception=False)
class _FakeResult(object):
def __init__(self, name, value):
self.name = name
self.value = value
class PrintResultTestCase(test_utils.TestCase):
@mock.patch('sys.stdout', six.StringIO())
def test_print_dict(self):
dict = {'key': 'value'}
utils.print_dict(dict)
self.assertEqual('+----------+-------+\n'
'| Property | Value |\n'
'+----------+-------+\n'
'| key | value |\n'
'+----------+-------+\n',
sys.stdout.getvalue())
@mock.patch('sys.stdout', six.StringIO())
def test_print_dict_wrap(self):
dict = {'key1': 'not wrapped',
'key2': 'this will be wrapped'}
utils.print_dict(dict, wrap=16)
self.assertEqual('+----------+--------------+\n'
'| Property | Value |\n'
'+----------+--------------+\n'
'| key1 | not wrapped |\n'
'| key2 | this will be |\n'
'| | wrapped |\n'
'+----------+--------------+\n',
sys.stdout.getvalue())
@mock.patch('sys.stdout', six.StringIO())
def test_print_list_sort_by_str(self):
objs = [_FakeResult("k1", 1),
_FakeResult("k3", 2),
_FakeResult("k2", 3)]
utils.print_list(objs, ["Name", "Value"], sortby_index=0)
self.assertEqual('+------+-------+\n'
'| Name | Value |\n'
'+------+-------+\n'
'| k1 | 1 |\n'
'| k2 | 3 |\n'
'| k3 | 2 |\n'
'+------+-------+\n',
sys.stdout.getvalue())
@mock.patch('sys.stdout', six.StringIO())
def test_print_list_sort_by_integer(self):
objs = [_FakeResult("k1", 1),
_FakeResult("k3", 2),
_FakeResult("k2", 3)]
utils.print_list(objs, ["Name", "Value"], sortby_index=1)
self.assertEqual('+------+-------+\n'
'| Name | Value |\n'
'+------+-------+\n'
'| k1 | 1 |\n'
'| k3 | 2 |\n'
'| k2 | 3 |\n'
'+------+-------+\n',
sys.stdout.getvalue())
@mock.patch('sys.stdout', six.StringIO())
def test_print_unicode_list(self):
objs = [_FakeResult("k", u'\u2026')]
utils.print_list(objs, ["Name", "Value"])
if six.PY3:
s = u'\u2026'
else:
s = encodeutils.safe_encode(u'\u2026')
self.assertEqual('+------+-------+\n'
'| Name | Value |\n'
'+------+-------+\n'
'| k | %s |\n'
'+------+-------+\n' % s,
sys.stdout.getvalue())
# without sorting
@mock.patch('sys.stdout', six.StringIO())
def test_print_list_sort_by_none(self):
objs = [_FakeResult("k1", 1),
_FakeResult("k3", 3),
_FakeResult("k2", 2)]
utils.print_list(objs, ["Name", "Value"], sortby_index=None)
self.assertEqual('+------+-------+\n'
'| Name | Value |\n'
'+------+-------+\n'
'| k1 | 1 |\n'
'| k3 | 3 |\n'
'| k2 | 2 |\n'
'+------+-------+\n',
sys.stdout.getvalue())
@mock.patch('sys.stdout', six.StringIO())
def test_print_dict_dictionary(self):
dict = {'k': {'foo': 'bar'}}
utils.print_dict(dict)
self.assertEqual('+----------+----------------+\n'
'| Property | Value |\n'
'+----------+----------------+\n'
'| k | {"foo": "bar"} |\n'
'+----------+----------------+\n',
sys.stdout.getvalue())
@mock.patch('sys.stdout', six.StringIO())
def test_print_dict_list_dictionary(self):
dict = {'k': [{'foo': 'bar'}]}
utils.print_dict(dict)
self.assertEqual('+----------+------------------+\n'
'| Property | Value |\n'
'+----------+------------------+\n'
'| k | [{"foo": "bar"}] |\n'
'+----------+------------------+\n',
sys.stdout.getvalue())
@mock.patch('sys.stdout', six.StringIO())
def test_print_dict_list(self):
dict = {'k': ['foo', 'bar']}
utils.print_dict(dict)
self.assertEqual('+----------+----------------+\n'
'| Property | Value |\n'
'+----------+----------------+\n'
'| k | ["foo", "bar"] |\n'
'+----------+----------------+\n',
sys.stdout.getvalue())
@mock.patch('sys.stdout', six.StringIO())
def test_print_large_dict_list(self):
dict = {'k': ['foo1', 'bar1', 'foo2', 'bar2',
'foo3', 'bar3', 'foo4', 'bar4']}
utils.print_dict(dict, wrap=40)
self.assertEqual(
'+----------+------------------------------------------+\n'
'| Property | Value |\n'
'+----------+------------------------------------------+\n'
'| k | ["foo1", "bar1", "foo2", "bar2", "foo3", |\n'
'| | "bar3", "foo4", "bar4"] |\n'
'+----------+------------------------------------------+\n',
sys.stdout.getvalue())
@mock.patch('sys.stdout', six.StringIO())
def test_print_unicode_dict(self):
dict = {'k': u'\u2026'}
utils.print_dict(dict)
if six.PY3:
s = u'\u2026'
else:
s = encodeutils.safe_encode(u'\u2026')
self.assertEqual('+----------+-------+\n'
'| Property | Value |\n'
'+----------+-------+\n'
'| k | %s |\n'
'+----------+-------+\n' % s,
sys.stdout.getvalue())
class FlattenTestCase(test_utils.TestCase):
def test_flattening(self):
squashed = utils.flatten_dict(
{'a1': {'b1': 1234,
'b2': 'string',
'b3': set((1, 2, 3)),
'b4': {'c1': ['l', 'l', ['l']],
'c2': 'string'}},
'a2': ['l'],
'a3': ('t',),
'a4': {}})
self.assertEqual({'a1_b1': 1234,
'a1_b2': 'string',
'a1_b3': set([1, 2, 3]),
'a1_b4_c1': ['l', 'l', ['l']],
'a1_b4_c2': 'string',
'a2': ['l'],
'a3': ('t',),
'a4': {}},
squashed)
def test_pretty_choice_dict(self):
d = {}
r = utils.pretty_choice_dict(d)
self.assertEqual("", r)
d = {"k1": "v1",
"k2": "v2",
"k3": "v3"}
r = utils.pretty_choice_dict(d)
self.assertEqual("'k1=v1', 'k2=v2', 'k3=v3'", r)
class ValidationsTestCase(test_utils.TestCase):
def test_validate_flavor_metadata_keys_with_valid_keys(self):
valid_keys = ['key1', 'month.price', 'I-Am:AK-ey.01-', 'spaces and _']
utils.validate_flavor_metadata_keys(valid_keys)
def test_validate_flavor_metadata_keys_with_invalid_keys(self):
invalid_keys = ['/1', '?1', '%1', '<', '>', '\1']
for key in invalid_keys:
try:
utils.validate_flavor_metadata_keys([key])
self.fail("Invalid key passed validation: %s" % key)
except exceptions.CommandError as ce:
self.assertIn(key, str(ce))
class ResourceManagerExtraKwargsHookTestCase(test_utils.TestCase):
def test_get_resource_manager_extra_kwargs_hook_test(self):
do_foo = mock.MagicMock()
def hook1(args):
return {'kwarg1': 'v_hook1'}
def hook2(args):
return {'kwarg1': 'v_hook2'}
do_foo.resource_manager_kwargs_hooks = [hook1, hook2]
args = {}
exc = self.assertRaises(exceptions.NoUniqueMatch,
utils.get_resource_manager_extra_kwargs,
do_foo,
args)
except_error = ("Hook 'hook2' is attempting to redefine "
"attributes")
self.assertIn(except_error, six.text_type(exc))
class DoActionOnManyTestCase(test_utils.TestCase):
def _test_do_action_on_many(self, side_effect, fail):
action = mock.Mock(side_effect=side_effect)
if fail:
self.assertRaises(exceptions.CommandError,
utils.do_action_on_many,
action, [1, 2], 'success with %s', 'error')
else:
utils.do_action_on_many(action, [1, 2], 'success with %s', 'error')
action.assert_has_calls([mock.call(1), mock.call(2)])
def test_do_action_on_many_success(self):
self._test_do_action_on_many([None, None], fail=False)
def test_do_action_on_many_first_fails(self):
self._test_do_action_on_many([Exception(), None], fail=True)
def test_do_action_on_many_last_fails(self):
self._test_do_action_on_many([None, Exception()], fail=True)
class RecordTimeTestCase(test_utils.TestCase):
def test_record_time(self):
times = []
with utils.record_time(times, True, 'a', 'b'):
pass
self.assertEqual(1, len(times))
self.assertEqual(3, len(times[0]))
self.assertEqual('a b', times[0][0])
self.assertIsInstance(times[0][1], float)
self.assertIsInstance(times[0][2], float)
times = []
with utils.record_time(times, False, 'x'):
pass
self.assertEqual(0, len(times))
class PrepareQueryStringTestCase(test_utils.TestCase):
def test_convert_dict_to_string(self):
ustr = b'?\xd0\xbf=1&\xd1\x80=2'
if six.PY3:
# in py3 real unicode symbols will be urlencoded
ustr = ustr.decode('utf8')
cases = (
({}, ''),
({'2': 2, '10': 1}, '?10=1&2=2'),
({'abc': 1, 'abc1': 2}, '?abc=1&abc1=2'),
({b'\xd0\xbf': 1, b'\xd1\x80': 2}, ustr),
({(1, 2): '1', (3, 4): '2'}, '?(1, 2)=1&(3, 4)=2')
)
for case in cases:
self.assertEqual(
case[1],
parse.unquote_plus(utils.prepare_query_string(case[0])))
| 36.45671 | 79 | 0.490055 |
ebac6ce39ff6d93d756089be8a7f68626df9409d | 3,573 | py | Python | pytext/utils/precision_utils.py | huntermonk/pytext | 34e3fd20dc0976d243d7260174d7847f4741b079 | [
"BSD-3-Clause"
] | null | null | null | pytext/utils/precision_utils.py | huntermonk/pytext | 34e3fd20dc0976d243d7260174d7847f4741b079 | [
"BSD-3-Clause"
] | null | null | null | pytext/utils/precision_utils.py | huntermonk/pytext | 34e3fd20dc0976d243d7260174d7847f4741b079 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
_APEX_DISABLED = False
try:
from apex import amp
except ImportError:
print("Install apex from https://github.com/NVIDIA/apex/.")
_APEX_DISABLED = True
except AttributeError as e:
print(f"Fail to import apex: {e}")
_APEX_DISABLED = True
from pytext.utils import cuda_utils
"""
Tips:
1. Must run fp16 on latest generation (Volta V100) GPU, CUDA 9.1 or newer
2. Additionally:
- Batch size should be a multiple of 8
- Tokens size should be a multiple of 8
- Embedding layers should be padded to be a multiple of 8
- Ideally, everything should be a multiple of 8 (e.g padding, etc)
3. Larger batch_size could increase GPU utilization and better performance.
4. Amp might not work well for model that require too many back-and-forth
parameter casting between fp16 and fp32.
"""
"""
Apex amp: https://github.com/NVIDIA/apex/tree/master/apex/amp
FP32 Master Weights <--(step)-- FP32 Gradients <--(unscale)-- Scaled FP16 Gradients
| |
(copy) | | (backprop)
| |
FP16 Weights --(forward)--> FP32 Loss --(loss scaling)--> Scaled FP32 Loss
For Apex.amp, it handle the Mixed precision training in the folloing ways
1. [Master weights]: master weights(e.g fp32) will mainted by PyTorch model
2. [Forward & Backward]: amp wrap PyTorch functions, it will cast inputs &
weights into fp16 or fp32, _amp_handle caches the casted arguments.
3. [Loss scaling]: _amp_handle handle loss scaling and unscaling
Using amp require adding three lines of code.
1. _amp_handle = amp.init(enabled=fp16_enabled)
2. optimizer = _amp_handle.wrap_optimizer(optimizer)
3. with optimizer.scale_loss(loss) as scaled_loss: scaled_loss.backward()
"""
_FP16_ENABLED = False
_amp_handle = None
def set_fp16(fp16_enabled: bool):
global _FP16_ENABLED
global _amp_handle
if _APEX_DISABLED:
return
_FP16_ENABLED = fp16_enabled
if _FP16_ENABLED:
if not cuda_utils.CUDA_ENABLED:
raise RuntimeError("Cuda is not available, should not running fp16...")
_amp_handle = amp.init(enabled=fp16_enabled)
def wrap_optimizer(optimizer):
if _FP16_ENABLED:
return _amp_handle.wrap_optimizer(optimizer)
else:
return optimizer
def backward(optimizer, loss):
if _FP16_ENABLED:
# 1. Use automatic loss scaling to best use fp16 range (skip step if overflow)
# 2. Clear handle's cache of casted parameters before the next optimizer step
with optimizer.scale_loss(loss) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
def deactivate():
global _FP16_ENABLED
if _FP16_ENABLED:
# restoring uncasted versions of functions
_amp_handle._deactivate()
_FP16_ENABLED = False
else:
pass
def maybe_float(tensor):
if _FP16_ENABLED and tensor.type().split(".")[-1] == "HalfTensor":
return tensor.float()
else:
return tensor
def maybe_half(tensor):
if _FP16_ENABLED and tensor.type().split(".")[-1] == "FloatTensor":
return tensor.half()
else:
return tensor
def pad_length(n):
if _FP16_ENABLED:
# To take advantage of tensor core, length should be multiple of 8
remainder = n % 8
if remainder > 0:
n = n + 8 - remainder
return n
| 29.528926 | 86 | 0.65911 |
d95ebbe719394829ebd21eb1bf15a55a1eb732a1 | 1,399 | py | Python | tests/testPublicKey.py | pibara/ecdsa-python | 908f3c7612bec89759e1b4407e40c2a9e2d28cf1 | [
"MIT"
] | null | null | null | tests/testPublicKey.py | pibara/ecdsa-python | 908f3c7612bec89759e1b4407e40c2a9e2d28cf1 | [
"MIT"
] | null | null | null | tests/testPublicKey.py | pibara/ecdsa-python | 908f3c7612bec89759e1b4407e40c2a9e2d28cf1 | [
"MIT"
] | null | null | null | # coding=utf-8
from unittest.case import TestCase
from ellipticcurve.privateKey import PrivateKey
from ellipticcurve.publicKey import PublicKey
from ellipticcurve.utils.compatibility import *
class PublicKeyTest(TestCase):
def testPemConversion(self):
privateKey = PrivateKey()
publicKey1 = privateKey.publicKey()
pem = publicKey1.toPem()
publicKey2 = PublicKey.fromPem(pem)
self.assertEqual(publicKey1.point.x, publicKey2.point.x)
self.assertEqual(publicKey1.point.y, publicKey2.point.y)
self.assertEqual(publicKey1.curve, publicKey2.curve)
def testDerConversion(self):
privateKey = PrivateKey()
publicKey1 = privateKey.publicKey()
der = publicKey1.toDer()
publicKey2 = PublicKey.fromDer(toBytes(der))
self.assertEqual(publicKey1.point.x, publicKey2.point.x)
self.assertEqual(publicKey1.point.y, publicKey2.point.y)
self.assertEqual(publicKey1.curve, publicKey2.curve)
def testStringConversion(self):
privateKey = PrivateKey()
publicKey1 = privateKey.publicKey()
string = publicKey1.toString()
publicKey2 = PublicKey.fromString(toBytes(string))
self.assertEqual(publicKey1.point.x, publicKey2.point.x)
self.assertEqual(publicKey1.point.y, publicKey2.point.y)
self.assertEqual(publicKey1.curve, publicKey2.curve)
| 37.810811 | 64 | 0.712652 |
474bc9b014eb47d2a2795a51cf912b7d85bffbd9 | 27,717 | py | Python | tensorflow_quantum/core/ops/batch_util.py | NunoEdgarGFlowHub/quantum | 260ad7e4631ed10109eab9fc34f242cfc1aac3af | [
"Apache-2.0"
] | 1 | 2020-03-18T19:48:36.000Z | 2020-03-18T19:48:36.000Z | tensorflow_quantum/core/ops/batch_util.py | NunoEdgarGFlowHub/quantum | 260ad7e4631ed10109eab9fc34f242cfc1aac3af | [
"Apache-2.0"
] | 1 | 2022-02-10T02:26:00.000Z | 2022-02-10T02:26:00.000Z | tensorflow_quantum/core/ops/batch_util.py | NunoEdgarGFlowHub/quantum | 260ad7e4631ed10109eab9fc34f242cfc1aac3af | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module to for running Cirq Simulators in parallel."""
import asyncio
import collections
import itertools
import os
import numpy as np
import cirq
import pathos
from tensorflow_quantum.core.serialize import serializer
# TODO (mbbrough): Remove this workaround class once cirq.PauliSumCollector can
# be used end to end with engine. This current issue is that
# cirq.PauliSumCollector does not produce serializable gates for basis
# conversion.
class TFQPauliSumCollector(cirq.work.collector.Collector):
"""Copy of cirq.PauliSumCollector with some fixes to work with engine."""
def __init__(self,
circuit,
observable,
*,
samples_per_term,
max_samples_per_job=1000000):
observable = cirq.PauliSum.wrap(observable)
self._circuit = circuit
self._samples_per_job = max_samples_per_job
self._pauli_coef_terms = [
(p / p.coefficient, p.coefficient) for p in observable if p
]
self._identity_offset = 0
for p in observable:
if not p:
self._identity_offset += p.coefficient
self._zeros = collections.defaultdict(lambda: 0)
self._ones = collections.defaultdict(lambda: 0)
self._samples_per_term = samples_per_term
self._total_samples_requested = 0
def next_job(self):
"""Get the next job."""
i = self._total_samples_requested // self._samples_per_term
if i >= len(self._pauli_coef_terms):
return None
pauli, _ = self._pauli_coef_terms[i]
remaining = self._samples_per_term * (i +
1) - self._total_samples_requested
amount_to_request = min(remaining, self._samples_per_job)
self._total_samples_requested += amount_to_request
return cirq.work.collector.CircuitSampleJob(
circuit=_fixed_circuit_plus_pauli_string_measurements(
self._circuit, pauli),
repetitions=amount_to_request,
tag=pauli)
def on_job_result(self, job, result):
"""Post process the `job` and `result` you have."""
job_id = job.tag
parities = result.histogram(key='out',
fold_func=lambda bits: np.sum(bits) % 2)
self._zeros[job_id] += parities[0]
self._ones[job_id] += parities[1]
def estimated_energy(self):
"""Sums up the sampled expectations, weighted by their coefficients."""
energy = 0j
for pauli_string, coef in self._pauli_coef_terms:
a = self._zeros[pauli_string]
b = self._ones[pauli_string]
if a + b:
energy += coef * (a - b) / (a + b)
energy = complex(energy)
if energy.imag == 0:
energy = energy.real
energy += self._identity_offset
return energy
def _fixed_circuit_plus_pauli_string_measurements(circuit, pauli_string):
"""A circuit measuring the given observable at the end of the given circuit.
"""
assert pauli_string
circuit = circuit.copy()
# Uses cirq.SingleQubitCliffordGates which aren't serializable by engine in
# cirq 0.6. This is a workaround until fixed.
# circuit.append(cirq.Moment(pauli_string.to_z_basis_ops()))
circuit.append(cirq.Moment(cirq.decompose(pauli_string.to_z_basis_ops())))
circuit.append(
cirq.Moment([cirq.measure(*sorted(pauli_string.keys()), key='out')]))
return circuit
def _make_complex_view(shape, init_val):
"""Build a RawArray that will map to the real and imaginary parts of a
complex number."""
shape = list(shape)
shape[-1] *= 2
data = np.ones(shape, dtype=np.float32) * init_val
flattened_size = 1
for dim_size in shape:
flattened_size *= dim_size
shared_mem_array = pathos.helpers.mp.RawArray('f', flattened_size)
np_view = np.frombuffer(shared_mem_array, dtype=np.float32).reshape(shape)
np.copyto(np_view, data)
return shared_mem_array
def _convert_complex_view_to_np(view, shape):
"""Get a numpy view ontop of the rawarray view. Small overhead."""
shape = list(shape)
shape[-1] *= 2
return np.frombuffer(view, dtype=np.float32).reshape(shape)
def _update_complex_np(np_view, i, to_add):
"""Update the shared memory undernath the numpy view.
to_add is passed by reference since we don't do much with it."""
np_view[i, ...] = np.pad(to_add,
(0, (np_view.shape[-1] // 2 - to_add.shape[-1])),
'constant',
constant_values=-2).view(np.float32)
def _convert_complex_view_to_result(view, shape):
"""Convert a rawarray view to a numpy array and reindex so that
the underlying pair of double arrays are squished together to make a
complex array of half the underlying size."""
shape = list(shape)
shape[-1] *= 2
np_view = np.frombuffer(view, dtype=np.float32).reshape(shape)
# The below view will cause a re-interpretation of underlying
# memory so use sparingly.
return np_view.view(np.complex64)
def _make_simple_view(shape, init_val, dtype, c_code):
"""Make a shared memory view for floating type."""
data = np.ones(shape, dtype=dtype) * init_val
flattened_size = 1
for dim_size in shape:
flattened_size *= dim_size
shared_mem_array = pathos.helpers.mp.RawArray(c_code, flattened_size)
np_view = np.frombuffer(shared_mem_array, dtype=dtype).reshape(shape)
np.copyto(np_view, data)
return shared_mem_array
def _convert_simple_view_to_np(view, dtype, shape):
"""Create a numpy view to a float array, low overhead."""
return np.frombuffer(view, dtype=dtype).reshape(shape)
def _batch_update_simple_np(np_view, i, to_add):
"""Update the shared memory underneath the numpy view.
to_add is again passed by reference."""
np_view[i, ...] = to_add
def _pointwise_update_simple_np(np_view, i, j, to_add):
"""Do a batch and sub-batch index update to numpy view."""
np_view[i, j, ...] = to_add
def _convert_simple_view_to_result(view, dtype, shape):
"""Convert a RawArray view to final numpy array."""
return np.frombuffer(view, dtype=dtype).reshape(shape)
def _prep_pool_input_args(indices, *args, slice_args=True):
"""Break down a set of indices, and optinal args into a generator
of length cpu_count."""
block_size = int(np.ceil(len(indices) / os.cpu_count()))
for i in range(0, len(indices), block_size):
if slice_args:
yield tuple([indices[i:i + block_size]] +
[x[i:i + block_size] for x in args])
else:
yield tuple([indices[i:i + block_size]] + [x for x in args])
# process are separate from all the other processes,
# so INFO_DICTs will not step on each other.
INFO_DICT = {}
def _setup_dict(array_view, view_shape, simulator, post_process):
INFO_DICT['arr'] = array_view
INFO_DICT['shape'] = view_shape
INFO_DICT['sim'] = simulator
INFO_DICT['post_process'] = post_process
def _state_worker_func(indices, programs, params):
"""Compute the wavefunction for each program in indices."""
x_np = _convert_complex_view_to_np(INFO_DICT['arr'], INFO_DICT['shape'])
simulator = INFO_DICT['sim']
for i, index in enumerate(indices):
result = simulator.simulate(programs[i], params[i])
final_array = INFO_DICT['post_process'](result).astype(np.complex64)
_update_complex_np(x_np, index, final_array)
def _analytical_expectation_worker_func(indices, programs, params, ops):
"""Compute the expectation of the op[batch_index], w.r.t
circuit[batch_index] where batch_index is calculated from indices."""
x_np = _convert_simple_view_to_np(INFO_DICT['arr'], np.float32,
INFO_DICT['shape'])
simulator = INFO_DICT['sim']
# TODO: remove this when picklable.
for i in range(len(ops)):
for j in range(len(ops[i])):
ops[i][j] = serializer.deserialize_paulisum(ops[i][j])
old_batch_index = -2
state = -1
for i, index_tuple in enumerate(indices):
batch_index = index_tuple[0]
op_index = index_tuple[1]
# (#679) Just ignore empty programs.
if len(programs[batch_index].all_qubits()) == 0:
continue
if old_batch_index != batch_index:
# must compute a new wavefunction.
qubit_oder = dict(
zip(sorted(programs[batch_index].all_qubits()),
list(range(len(programs[batch_index].all_qubits())))))
state = simulator.simulate(programs[batch_index],
params[batch_index])
result = INFO_DICT['post_process'](ops[batch_index][op_index], state,
qubit_oder)
_pointwise_update_simple_np(x_np, batch_index, op_index, result)
old_batch_index = batch_index
def _sample_expectation_worker_func(indices, programs, params, ops, n_samples):
x_np = _convert_simple_view_to_np(INFO_DICT['arr'], np.float32,
INFO_DICT['shape'])
simulator = INFO_DICT['sim']
# TODO: remove this when picklable.
for i in range(len(ops)):
for j in range(len(ops[i])):
ops[i][j] = serializer.deserialize_paulisum(ops[i][j])
for i, index_tuple in enumerate(indices):
batch_index = index_tuple[0]
op_index = index_tuple[1]
# (#679) Just ignore empty programs.
if len(programs[batch_index].all_qubits()) == 0:
continue
circuit = cirq.resolve_parameters(programs[batch_index],
params[batch_index])
sampler = TFQPauliSumCollector(
circuit,
ops[batch_index][op_index],
samples_per_term=n_samples[batch_index][op_index])
asyncio.set_event_loop(asyncio.new_event_loop())
sampler.collect(simulator, concurrency=1)
result = sampler.estimated_energy().real
_pointwise_update_simple_np(x_np, batch_index, op_index, result)
def _sample_worker_func(indices, programs, params, n_samples):
"""Sample n_samples from progams[i] with params[i] placed in it."""
x_np = _convert_simple_view_to_np(INFO_DICT['arr'], np.int32,
INFO_DICT['shape'])
simulator = INFO_DICT['sim']
for i, index in enumerate(indices):
qubits = sorted(programs[i].all_qubits())
# (#679) Just ignore empty programs.
if len(qubits) == 0:
continue
state = simulator.simulate(programs[i], params[i])
samples = INFO_DICT['post_process'](state, len(qubits),
n_samples[i]).astype(np.int32)
_batch_update_simple_np(
x_np, index,
np.pad(samples, ((0, 0), (x_np.shape[2] - len(qubits), 0)),
'constant',
constant_values=-2))
def _validate_inputs(circuits, param_resolvers, simulator, sim_type):
"""Type check and sanity check inputs."""
if not isinstance(circuits, (list, tuple, np.ndarray)):
raise TypeError('circuits must be a list or array.'
' Given: {}'.format(type(circuits)))
if any(not isinstance(x, cirq.Circuit) for x in circuits):
raise TypeError('circuits must contain cirq.Circuit objects')
if not isinstance(param_resolvers, (list, tuple, np.ndarray)):
raise TypeError('param_resolvers must be a list or array.'
' Given: {}'.format(type(param_resolvers)))
if any(not isinstance(x, cirq.ParamResolver) for x in param_resolvers):
raise TypeError('param_resolvers must contain cirq.ParamResolvers.')
if not (len(circuits) == len(param_resolvers)):
raise ValueError('Circuit batch size does not match resolve batch '
'size.')
if sim_type == 'analytic':
if not isinstance(simulator, cirq.SimulatesFinalState):
raise TypeError('For analytic operations only'
' cirq.SimulatesFinalState'
' is required. Given: {}'.format(type(simulator)))
elif sim_type == 'sample':
if not isinstance(simulator, cirq.Sampler):
raise TypeError('For sample based operations a cirq.Sampler is '
'required. Given: {}'.format(type(simulator)))
else:
raise ValueError('Invalid simulator type specified.')
def batch_calculate_state(circuits, param_resolvers, simulator):
"""Compute states using a given simulator using parallel processing.
Returns a NumPy array containing the final circuit state for each
`cirq.Circuit` in `circuits`, given that the corresponding
`cirq.ParamResolver` in `param_resolvers` was used to resolve any symbols
in it. If simulator is a `cirq.DensityMatrixSimulator` this final state will
be a density matrix, if simulator is a `cirq.Simulator` this final state
will be a wavefunction. More specifically for a given `i`
`batch_calculate_state` will use `param_resolvers[i]` to resolve the symbols
in `circuits[i]` and then place the final state in the return list at index
`i`.
Args:
circuits: Python `list` of `cirq.Circuit`s.
param_resolvers: Python `list` of `cirq.ParamResolver`s, where
`param_resolvers[i]` is the resolver to be used with `circuits[i]`.
simulator: Simulator object. Currently
supported are `cirq.DensityMatrixSimulator` and `cirq.Simulator`.
Returns:
`np.ndarray` containing the resulting state information. The array will
have dimensions: [len(circuits), <size of biggest state>] in the
case of `cirq.Simulator`. In the case of `cirq.DensityMatrixSimulator`
the shape is
[len(circuits), <size of biggest state>, <size of biggest state>]
"""
_validate_inputs(circuits, param_resolvers, simulator, 'analytic')
biggest_circuit = max(len(circuit.all_qubits()) for circuit in circuits)
if isinstance(simulator,
cirq.sim.density_matrix_simulator.DensityMatrixSimulator):
return_mem_shape = (len(circuits), 1 << biggest_circuit,
1 << biggest_circuit)
post_process = lambda x: x.final_density_matrix
elif isinstance(simulator, cirq.sim.sparse_simulator.Simulator):
return_mem_shape = (len(circuits), 1 << biggest_circuit)
post_process = lambda x: x.final_state
else:
raise TypeError('Simulator {} is not supported by '
'batch_calculate_state.'.format(type(simulator)))
shared_array = _make_complex_view(return_mem_shape, -2)
input_args = _prep_pool_input_args(range(len(circuits)), circuits,
param_resolvers)
with pathos.pools._ProcessPool(processes=None,
initializer=_setup_dict,
initargs=(shared_array, return_mem_shape,
simulator, post_process)) as pool:
pool.starmap(_state_worker_func, list(input_args))
return _convert_complex_view_to_result(shared_array, return_mem_shape)
def batch_calculate_expectation(circuits, param_resolvers, ops, simulator):
"""Compute expectations from circuits using parallel processing.
Returns a `np.ndarray` containing the expectation values of `ops`
applied to a specific circuit in `circuits`, given that the
corresponding `cirq.ParamResolver` in `param_resolvers` was used to resolve
any symbols in the circuit. Specifically the returned array at index `i,j`
will be equal to the expectation value of `ops[i][j]` on `circuits[i]` with
`param_resolvers[i]` used to resolve any symbols in `circuits[i]`.
Expectation calculations will be carried out using the simulator object
(`cirq.DensityMatrixSimulator` and `cirq.Simulator` are currently supported)
Args:
circuits: Python `list` of `cirq.Circuit`s.
param_resolvers: Python `list` of `cirq.ParamResolver`s, where
`param_resolvers[i]` is the resolver to be used with `circuits[i]`.
ops: 2d Python `list` of `cirq.PauliSum` objects where `ops[i][j]` will
be used to calculate the expectation on `circuits[i]` for all `j`,
after `param_resolver[i]` is used to resolve any parameters
in the circuit.
simulator: Simulator object. Currently supported are
`cirq.DensityMatrixSimulator` and `cirq.Simulator`.
Returns:
`np.ndarray` containing the expectation values. Shape is:
[len(circuits), len(ops[0])]
"""
_validate_inputs(circuits, param_resolvers, simulator, 'analytic')
if not isinstance(ops, (list, tuple, np.ndarray)):
raise TypeError('ops must be a list or array.'
' Given: {}'.format(type(ops)))
if len(ops) != len(circuits):
raise ValueError('Shape of ops and circuits do not match.')
for sub_list in ops:
if not isinstance(sub_list, (list, tuple, np.ndarray)):
raise TypeError('elements of ops must be type list.')
for x in sub_list:
if not isinstance(x, cirq.PauliSum):
raise TypeError('ops must contain only cirq.PauliSum objects.'
' Given: {}'.format(type(x)))
return_mem_shape = (len(circuits), len(ops[0]))
if isinstance(simulator,
cirq.sim.density_matrix_simulator.DensityMatrixSimulator):
post_process = lambda op, state, order: sum(
x._expectation_from_density_matrix_no_validation(
state.final_density_matrix, order) for x in op).real
elif isinstance(simulator, cirq.sim.sparse_simulator.Simulator):
post_process = \
lambda op, state, order: op.expectation_from_wavefunction(
state.final_state, order).real
else:
raise TypeError('Simulator {} is not supported by '
'batch_calculate_expectation.'.format(type(simulator)))
shared_array = _make_simple_view(return_mem_shape, -2, np.float32, 'f')
# avoid mutating ops array
ops = np.copy(ops)
# TODO (mbbrough): make cirq PauliSUms pickable at some point ?
for i in range(len(ops)):
for j in range(len(ops[i])):
ops[i][j] = serializer.serialize_paulisum(ops[i][j])
input_args = list(
_prep_pool_input_args(list(
itertools.product(range(len(circuits)), range(len(ops[0])))),
circuits,
param_resolvers,
ops,
slice_args=False))
with pathos.pools._ProcessPool(processes=None,
initializer=_setup_dict,
initargs=(shared_array, return_mem_shape,
simulator, post_process)) as pool:
pool.starmap(_analytical_expectation_worker_func, input_args)
return _convert_simple_view_to_result(shared_array, np.float32,
return_mem_shape)
def batch_calculate_sampled_expectation(circuits, param_resolvers, ops,
n_samples, simulator):
"""Compute expectations from sampling circuits using parallel processing.
Returns a `np.ndarray` containing the expectation values of `ops`
applied to a specific circuit in `circuits`, given that the
corresponding `cirq.ParamResolver` in `param_resolvers` was used to resolve
any symbols in the circuit. Specifically the returned array at index `i,j`
will be equal to the expectation value of `ops[i][j]` on `circuits[i]` with
`param_resolvers[i]` used to resolve any symbols in `circuits[i]`.
Expectation estimations will be carried out using the simulator object
(`cirq.DensityMatrixSimulator` and `cirq.Simulator` are currently supported)
. Expectations for ops[i][j] are estimated by drawing n_samples[i][j]
samples.
Args:
circuits: Python `list` of `cirq.Circuit`s.
param_resolvers: Python `list` of `cirq.ParamResolver`s, where
`param_resolvers[i]` is the resolver to be used with `circuits[i]`.
ops: 2d Python `list` of `cirq.PauliSum` objects where `ops[i][j]` will
be used to calculate the expectation on `circuits[i]` for all `j`,
after `param_resolver[i]` is used to resolve any parameters
in the circuit.
n_samples: 2d Python `list` of `int`s where `n_samples[i][j]` is
equal to the number of samples to draw in each term of `ops[i][j]`
when estimating the expectation.
simulator: Simulator object. Currently supported are
`cirq.DensityMatrixSimulator` and `cirq.Simulator`.
Returns:
`np.ndarray` containing the expectation values. Shape is:
[len(circuits), len(ops[0])]
"""
_validate_inputs(circuits, param_resolvers, simulator, 'sample')
if not isinstance(ops, (list, tuple, np.ndarray)):
raise TypeError('ops must be a list or array.'
' Given: {}'.format(type(ops)))
if len(ops) != len(circuits):
raise ValueError('Shape of ops and circuits do not match.')
if len(n_samples) != len(circuits):
raise ValueError('Shape of n_samples does not match circuits.')
for sub_list in n_samples:
if not isinstance(sub_list, (list, tuple, np.ndarray)):
raise TypeError('Elements of n_elements must be lists of ints.')
for x in sub_list:
if not isinstance(x, int):
raise TypeError('Non-integer value found in n_samples.')
if x <= 0:
raise ValueError('n_samples contains sample value <= 0.')
for sub_list in ops:
if not isinstance(sub_list, (list, tuple, np.ndarray)):
raise TypeError('elements of ops must be type list.')
for x in sub_list:
if not isinstance(x, cirq.PauliSum):
raise TypeError('ops must contain only cirq.PauliSum objects.'
' Given: {}'.format(type(x)))
return_mem_shape = (len(circuits), len(ops[0]))
shared_array = _make_simple_view(return_mem_shape, -2, np.float32, 'f')
# avoid mutating ops array
ops = np.copy(ops)
# TODO (mbbrough): make cirq PauliSums pickable at some point ?
for i in range(len(ops)):
for j in range(len(ops[i])):
ops[i][j] = serializer.serialize_paulisum(ops[i][j])
input_args = list(
_prep_pool_input_args(list(
itertools.product(range(len(circuits)), range(len(ops[0])))),
circuits,
param_resolvers,
ops,
n_samples,
slice_args=False))
with pathos.pools._ProcessPool(processes=None,
initializer=_setup_dict,
initargs=(shared_array, return_mem_shape,
simulator, None)) as pool:
pool.starmap(_sample_expectation_worker_func, input_args)
return _convert_simple_view_to_result(shared_array, np.float32,
return_mem_shape)
def batch_sample(circuits, param_resolvers, n_samples, simulator):
"""Sample from circuits using parallel processing.
Returns a `np.ndarray` containing n_samples samples from all the circuits in
circuits given that the corresponding `cirq.ParamResolver` in
`param_resolvers` was used to resolve any symbols. Specifically the
returned array at index `i,j` will correspond to a `np.ndarray` of
booleans representing bitstring `j` that was sampled from `circuits[i]`.
Samples are drawn using the provided simulator object (Currently supported
are `cirq.DensityMatrixSimulator` and `cirq.Simulator`).
Note: In order to keep numpy shape consistent, smaller circuits will
have sample bitstrings padded with -2 on "qubits that don't exist
in the circuit".
Args:
circuits: Python `list` of `cirq.Circuit`s.
param_resolvers: Python `list` of `cirq.ParamResolver`s, where
`param_resolvers[i]` is the resolver to be used with `circuits[i]`.
n_samples: `int` describing number of samples to draw from each
circuit.
simulator: Simulator object. Currently
supported are `cirq.DensityMatrixSimulator` and `cirq.Simulator`.
Returns:
`np.ndarray` containing the samples with invalid qubits blanked out.
It's shape is
[len(circuits), n_samples, <# qubits in largest circuit>].
circuits that are smaller than #qubits in largest circuit have null
qubits in bitstrings mapped to -2.
"""
_validate_inputs(circuits, param_resolvers, simulator, 'sample')
if not isinstance(n_samples, int):
raise TypeError('n_samples must be an int.'
'Given: {}'.format(type(n_samples)))
if n_samples <= 0:
raise ValueError('n_samples must be > 0.')
biggest_circuit = max(len(circuit.all_qubits()) for circuit in circuits)
return_mem_shape = (len(circuits), n_samples, biggest_circuit)
shared_array = _make_simple_view(return_mem_shape, -2, np.int32, 'i')
if isinstance(simulator,
cirq.sim.density_matrix_simulator.DensityMatrixSimulator):
post_process = lambda state, size, n_samples: \
cirq.sample_density_matrix(
state.final_density_matrix, [i for i in range(size)],
repetitions=n_samples)
elif isinstance(simulator, cirq.sim.sparse_simulator.Simulator):
post_process = lambda state, size, n_samples: cirq.sample_state_vector(
state.final_state, list(range(size)), repetitions=n_samples)
else:
raise TypeError('Simulator {} is not supported by batch_sample.'.format(
type(simulator)))
input_args = list(
_prep_pool_input_args(range(len(circuits)), circuits, param_resolvers,
[n_samples] * len(circuits)))
with pathos.pools._ProcessPool(processes=None,
initializer=_setup_dict,
initargs=(shared_array, return_mem_shape,
simulator, post_process)) as pool:
pool.starmap(_sample_worker_func, input_args)
return _convert_simple_view_to_result(shared_array, np.int32,
return_mem_shape)
| 43.105754 | 80 | 0.636902 |
33be441ee8dbf83c1b0037675e803015ae9b8cf1 | 929 | py | Python | tests/reduction_complex.py | manopapad/legate.numpy | 896f4fd9b32db445da6cdabf7b78d523fca96936 | [
"Apache-2.0"
] | null | null | null | tests/reduction_complex.py | manopapad/legate.numpy | 896f4fd9b32db445da6cdabf7b78d523fca96936 | [
"Apache-2.0"
] | null | null | null | tests/reduction_complex.py | manopapad/legate.numpy | 896f4fd9b32db445da6cdabf7b78d523fca96936 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import legate.numpy as lg
def test():
numpyX = np.array([1 + 4j, 2 + 5j, 3 + 6j], np.complex64)
x = lg.array(numpyX)
z = lg.sum(x)
assert lg.all(lg.abs(z - np.sum(numpyX)) < 1e-5)
z = lg.prod(x)
assert lg.all(lg.abs(z - np.prod(numpyX)) < 1e-5)
return
if __name__ == "__main__":
test()
| 26.542857 | 74 | 0.691066 |
8b90ee4be04365222424a1a9afaa810bceb434b3 | 5,510 | py | Python | my_account/my_account/doctype/sync_server_settings.py | bobzz-zone/saas_my_account | 0349bf14714bd070ec003dd96b3f60878af1b9b1 | [
"MIT"
] | null | null | null | my_account/my_account/doctype/sync_server_settings.py | bobzz-zone/saas_my_account | 0349bf14714bd070ec003dd96b3f60878af1b9b1 | [
"MIT"
] | null | null | null | my_account/my_account/doctype/sync_server_settings.py | bobzz-zone/saas_my_account | 0349bf14714bd070ec003dd96b3f60878af1b9b1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, erpx and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from my_account.my_account.doctype.frappeclient import FrappeClient
import json
import os
import requests
import subprocess
from frappe.utils.background_jobs import enqueue
from frappe.utils import get_site_name
class SyncServerSettings(Document):
pass
@frappe.whitelist()
def enqueue_sync():
enqueue("my_account.my_account.doctype.sync_server_settings.sync_invoice_to_sales_invoice")
@frappe.whitelist()
def sync_invoice_to_sales_invoice():
# nb yang perlu di ganti sesuai dengan site tujuannya, ku kasih comment perlu di ganti
# perlu di ganti
clientroot = FrappeClient("https://demo.antusias.id/", "Administrator", "D4s@tm21")
# query invoice
get_invoice = frappe.db.sql("""
SELECT
i.`name`,
i.`subdomain`,
i.`total_user`,
i.`status`,
i.`posting_date`,
i.`total`,
i.`discount`,
i.`total_after_discount`,
i.`tax_package`,
i.`grand_total`,
ifnull(i.`sales_partner`,"")
FROM `tabInvoice` i
WHERE i.`status` = "Paid"
AND i.`sync_domain` = 0
AND i.`subdomain` IS NOT NULL
ORDER BY i.`subdomain`, i.`posting_date`
""", as_list=1)
if get_invoice :
for i in get_invoice :
invoice_name = i[0]
subdomain = i[1]
total_user = i[2]
posting_date = i[4]
total = i[5]
if i[6] < 0 :
discount = i[6] * -1
else :
discount = i[6]
total_after_discount = i[7]
tax_package = i[8]
grand_total = i[9]
sales_partner = i[10]
# cek customer
customer_name = "subdomain-"+str(subdomain)
customer_tujuan = clientroot.get_value("Customer", "name", {"name":customer_name})
if customer_tujuan :
count = 0
else :
# create customer
doc_customer = {"doctype":"Customer"}
doc_customer["customer_name"] = customer_name
# perlu di ganti
doc_customer["customer_type"] = "Individual"
# perlu di ganti
doc_customer["customer_group"] = "All Customer Groups"
# perlu di ganti
doc_customer["territory"] = "All Territories"
clientroot.insert(doc_customer)
# insert sales invoice
doc_invoice = {
"doctype":"Sales Invoice",
"customer":customer_name,
"company":"PT. Demo Antusias",
"set_posting_time":"1",
"posting_date":str(posting_date),
"is_pos":"1",
"update_stock":"0",
"items":[{
"parenttype" : "Sales Invoice",
"parentfield" : "items",
"item_code": "Package User",
"item_name": "Package User",
"description": "Package User",
"stock_uom": "User",
"uom": "User",
"conversion_factor": 1,
"rate": str(int(total) / int(total_user)),
"amount" : str(total),
"qty" : str(total_user)
}],
"apply_discount_on":"Net Total",
"discount_amount":discount,
"taxes":[{
"parenttype" : "Sales Invoice",
"parentfield" : "taxes",
"charge_type": "On Net Total",
"account_head": "2103.0100 - PPN Keluaran - PDA",
"cost_center": "Main - PDA",
"description": "PPN",
"rate": 10
}],
"payments":[{
"parenttype" : "Sales Invoice",
"parentfield" : "payments",
"default": 1,
"mode_of_payment": "Cash",
"amount": str(grand_total)
}],
"sales_partner":sales_partner,
"remarks":invoice_name
}
# doc_invoice["customer"] = customer_name
# # perlu di ganti
# doc_invoice["company"] = "PT. Demo Antusias"
# doc_invoice["set_posting_time"] = "1"
# doc_invoice["posting_date"] = posting_date
# doc_invoice["is_pos"] = "1"
# doc_invoice["update_stock"] = "0"
# doc_invoice["items"] = [{
# "item_code": "Package User",
# "item_name": "Package User",
# "description": "Package User",
# "stock_uom": "User",
# "uom": "User",
# "conversion_factor": 1,
# "rate": int(total) / int(total_user),
# "amount" : int(total),
# "qty" : int(total_user)
# }]
# # doc_invoice["total"] = total
# # doc_invoice["net_total"] = total
# doc_invoice["apply_discount_on"] = "Net Total"
# doc_invoice["discount_amount"] = discount
# doc_invoice["taxes"] = [{
# "charge_type": "On Net Total",
# "account_head": "2103.0100 - PPN Keluaran - PDA",
# "cost_center": "Main - PDA",
# "description": "PPN",
# "rate": 10
# }]
# doc_invoice["payments"] = [{
# "default": 1,
# "mode_of_payment": "Cash",
# "amount": int(grand_total)
# }]
# doc_invoice["sales_partner"] = sales_partner
# doc_invoice["remarks"] = invoice_name
# frappe.throw(json.dumps( doc_invoice ))
result = clientroot.insert(doc_invoice)
sinv_doc = clientroot.get_doc("Sales Invoice", result["name"])
clientroot.submit(sinv_doc)
# frappe.throw(result["name"])
frappe.db.sql(""" UPDATE `tabInvoice` SET sync_domain = 1 WHERE NAME = "{}" """.format(invoice_name))
frappe.db.commit()
| 26.878049 | 105 | 0.574592 |
1fb9470af41162f99511873fac84a5f435fa264f | 1,198 | py | Python | utils/puzzle_reader.py | nitekat1124/advent-of-code-2017 | a18884137fa5354ebe537b8730930b9a59c613af | [
"WTFPL"
] | null | null | null | utils/puzzle_reader.py | nitekat1124/advent-of-code-2017 | a18884137fa5354ebe537b8730930b9a59c613af | [
"WTFPL"
] | null | null | null | utils/puzzle_reader.py | nitekat1124/advent-of-code-2017 | a18884137fa5354ebe537b8730930b9a59c613af | [
"WTFPL"
] | null | null | null | import os, sys, glob, re
class PuzzleReader:
@staticmethod
def get_puzzle_input(day_num, is_raw):
return [line.strip("\n") if is_raw else line.strip() for line in open(f"{PuzzleReader.get_path()}/data/day{day_num:02d}/puzzle_input.txt", "r").readlines()]
@staticmethod
def get_test_input(day_num, is_raw):
inputs = []
for name in sorted(glob.glob(f"{PuzzleReader.get_path()}/data/day{day_num:02d}/*")):
if len(re.findall(r"^test_\d+_input.txt$", os.path.basename(name))):
inputs += [[line.strip("\n") if is_raw else line.strip() for line in open(name, "r").readlines()]]
return inputs
@staticmethod
def get_test_result(day_num, part_num):
results = []
for name in sorted(glob.glob(f"{PuzzleReader.get_path()}/data/day{day_num:02d}/*")):
if len(re.findall(r"^test_\d+_part" + str(part_num) + "_result.txt$", os.path.basename(name))):
results += [[line.strip() for line in open(name, "r").readlines()]]
return results
@staticmethod
def get_path():
return path if os.path.isdir(path := os.path.realpath(sys.argv[0])) else os.path.dirname(path)
| 42.785714 | 164 | 0.626878 |
1709a46fb50f853ba289e10d94e65b695cf17a67 | 3,197 | py | Python | src/timelord/timelord_api.py | DONG-Jason/chia-blockchain | 27b28d62f6b315e45bc00231e007c775f07a414a | [
"Apache-2.0"
] | null | null | null | src/timelord/timelord_api.py | DONG-Jason/chia-blockchain | 27b28d62f6b315e45bc00231e007c775f07a414a | [
"Apache-2.0"
] | null | null | null | src/timelord/timelord_api.py | DONG-Jason/chia-blockchain | 27b28d62f6b315e45bc00231e007c775f07a414a | [
"Apache-2.0"
] | null | null | null | from typing import Callable, Optional
import logging
from src.protocols import timelord_protocol
from src.timelord.timelord import Timelord, iters_from_sub_block, Chain, IterationType
from src.util.api_decorators import api_request
from src.util.ints import uint64
log = logging.getLogger(__name__)
class TimelordAPI:
timelord: Timelord
def __init__(self, timelord):
self.timelord = timelord
def _set_state_changed_callback(self, callback: Callable):
pass
@api_request
async def new_peak(self, new_peak: timelord_protocol.NewPeak):
async with self.timelord.lock:
if new_peak.reward_chain_sub_block.weight > self.timelord.last_state.get_weight():
log.info("Not skipping peak, don't have. Maybe we are not the fastest timelord")
log.info(
f"New peak: height: {new_peak.reward_chain_sub_block.sub_block_height} weight: "
f"{new_peak.reward_chain_sub_block.weight} "
)
self.timelord.new_peak = new_peak
elif (
self.timelord.last_state.peak is not None
and self.timelord.last_state.peak.reward_chain_sub_block == new_peak.reward_chain_sub_block
):
log.info("Skipping peak, already have.")
return
else:
log.warning("Sub-block that we don't have, changing to it.")
self.timelord.new_peak = new_peak
self.timelord.new_subslot_end = None
@api_request
async def new_unfinished_sub_block(self, new_unfinished_subblock: timelord_protocol.NewUnfinishedSubBlock):
async with self.timelord.lock:
try:
sp_iters, ip_iters = iters_from_sub_block(
self.timelord.constants,
new_unfinished_subblock.reward_chain_sub_block,
self.timelord.last_state.get_sub_slot_iters(),
self.timelord.last_state.get_difficulty(),
)
except Exception:
return
last_ip_iters = self.timelord.last_state.get_last_ip()
if sp_iters > ip_iters:
self.timelord.overflow_blocks.append(new_unfinished_subblock)
elif ip_iters > last_ip_iters:
new_block_iters: Optional[uint64] = self.timelord._can_infuse_unfinished_block(new_unfinished_subblock)
if new_block_iters:
self.timelord.unfinished_blocks.append(new_unfinished_subblock)
for chain in [Chain.REWARD_CHAIN, Chain.CHALLENGE_CHAIN]:
self.timelord.iters_to_submit[chain].append(new_block_iters)
if (
self.timelord.last_state.get_deficit()
< self.timelord.constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK
):
self.timelord.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(new_block_iters)
self.timelord.iteration_to_proof_type[new_block_iters] = IterationType.INFUSION_POINT
self.timelord.total_unfinished += 1
| 45.671429 | 119 | 0.632781 |
fb93e366da3a83762556253ce8f6f9b28a343fa6 | 10,842 | py | Python | integration/airflow/tests/extractors/test_bigquery_extractor.py | Arnaud-Nauwynck/OpenLineage | 18e711e1c7187a85bc7c98790466d4593c8306ed | [
"Apache-2.0"
] | null | null | null | integration/airflow/tests/extractors/test_bigquery_extractor.py | Arnaud-Nauwynck/OpenLineage | 18e711e1c7187a85bc7c98790466d4593c8306ed | [
"Apache-2.0"
] | null | null | null | integration/airflow/tests/extractors/test_bigquery_extractor.py | Arnaud-Nauwynck/OpenLineage | 18e711e1c7187a85bc7c98790466d4593c8306ed | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import random
import unittest
from datetime import datetime
import mock
import pytz
from airflow.utils import timezone
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.models import TaskInstance, DAG
from airflow.utils.state import State
from openlineage.airflow.extractors.bigquery_extractor import BigQueryExtractor
from openlineage.client.facet import OutputStatisticsOutputDatasetFacet
from openlineage.common.provider.bigquery import BigQueryJobRunFacet, \
BigQueryStatisticsDatasetFacet, BigQueryErrorRunFacet
from openlineage.common.utils import get_from_nullable_chain
log = logging.getLogger(__name__)
class TestBigQueryExtractorE2E(unittest.TestCase):
def setUp(self):
log.debug("TestBigQueryExtractorE2E.setup(): ")
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
@mock.patch('google.cloud.bigquery.Client')
def test_extract(self, mock_client, mock_hook):
log.info("test_extractor")
job_details = self.read_file_json(
"tests/extractors/job_details.json")
table_details = self.read_dataset_json(
"tests/extractors/table_details.json")
out_details = self.read_dataset_json(
"tests/extractors/out_table_details.json")
bq_job_id = "foo.bq.job_id"
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.run_query.return_value = bq_job_id
mock_client.return_value \
.get_job.return_value \
._properties = job_details
mock_client.return_value \
.get_table.side_effect = [table_details, out_details]
# To make sure hasattr "sees" close and calls it
mock_client.return_value.close.return_value
mock.seal(mock_hook)
mock.seal(mock_client)
dag = DAG(dag_id='TestBigQueryExtractorE2E')
task = BigQueryOperator(
sql='select first_name, last_name from customers;',
task_id="task_id",
project_id="project_id",
dag_id="dag_id",
dag=dag,
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0)
)
task_instance = TaskInstance(
task=task,
execution_date=datetime.utcnow().replace(tzinfo=pytz.utc))
bq_extractor = BigQueryExtractor(task)
task_meta_extract = bq_extractor.extract()
assert task_meta_extract is None
task_instance.run()
task_meta = bq_extractor.extract_on_complete(task_instance)
mock_client.return_value \
.get_job.assert_called_once_with(job_id=bq_job_id)
assert task_meta.inputs is not None
assert len(task_meta.inputs) == 1
assert task_meta.inputs[0].name == \
'bigquery-public-data.usa_names.usa_1910_2013'
assert task_meta.inputs[0].facets['schema'].fields is not None
assert task_meta.inputs[0].facets['dataSource'].name == 'bigquery'
assert task_meta.inputs[0].facets['dataSource'].uri == 'bigquery'
assert len(task_meta.inputs[0].facets['schema'].fields) == 5
assert task_meta.outputs is not None
assert len(task_meta.outputs) == 1
assert task_meta.outputs[0].facets['schema'].fields is not None
assert len(task_meta.outputs[0].facets['schema'].fields) == 2
assert task_meta.outputs[0].name == \
'bq-airflow-openlineage.new_dataset.output_table'
assert BigQueryStatisticsDatasetFacet(
rowCount=20,
size=321
) == task_meta.outputs[0].facets['stats']
assert OutputStatisticsOutputDatasetFacet(
rowCount=20,
size=321
) == task_meta.outputs[0].outputFacets['outputStatistics']
assert len(task_meta.run_facets) == 1
assert BigQueryJobRunFacet(
cached=False,
billedBytes=111149056,
properties=json.dumps(job_details)
) == task_meta.run_facets['bigQuery_job']
mock_client.return_value.close.assert_called()
def read_dataset_json(self, file):
client_mock = self.Client_mock()
client_mock._properties = self.read_file_json(file)
return client_mock
class Client_mock:
_properties = None
def read_file_json(self, file):
f = open(
file=file,
mode="r"
)
details = json.loads(f.read())
f.close()
return details
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
@mock.patch('google.cloud.bigquery.Client')
def test_extract_cached(self, mock_client, mock_hook):
bq_job_id = "foo.bq.job_id"
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.run_query.return_value = bq_job_id
job_details = self.read_file_json(
"tests/extractors/cached_job_details.json"
)
mock_client.return_value.get_job.return_value._properties = job_details
# To make sure hasattr "sees" close and calls it
mock_client.return_value.close.return_value
mock.seal(mock_hook)
mock.seal(mock_client)
dag = DAG(dag_id='TestBigQueryExtractorE2E')
task = BigQueryOperator(
sql='select first_name, last_name from customers;',
task_id="task_id",
project_id="project_id",
dag_id="dag_id",
dag=dag,
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0)
)
task_instance = TaskInstance(
task=task,
execution_date=datetime.utcnow().replace(tzinfo=pytz.utc))
bq_extractor = BigQueryExtractor(task)
tasks_meta_extract = bq_extractor.extract()
assert tasks_meta_extract is None
task_instance.run()
task_meta = bq_extractor.extract_on_complete(task_instance)
assert task_meta.inputs is not None
assert task_meta.outputs is not None
assert len(task_meta.run_facets) == 1
assert task_meta.run_facets['bigQuery_job'] \
== BigQueryJobRunFacet(cached=True)
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
@mock.patch('google.cloud.bigquery.Client')
def test_extract_error(self, mock_client, mock_hook):
bq_job_id = "foo.bq.job_id"
mock_hook.return_value \
.get_conn.return_value \
.cursor.return_value \
.run_query.return_value = bq_job_id
mock_client.return_value \
.get_job.side_effects = [Exception("bq error")]
# To make sure hasattr "sees" close and calls it
mock_client.return_value.close.return_value
mock.seal(mock_hook)
mock.seal(mock_client)
dag = DAG(dag_id='TestBigQueryExtractorE2E')
task = BigQueryOperator(
sql='select first_name, last_name from customers;',
task_id="task_id",
project_id="project_id",
dag_id="dag_id",
dag=dag,
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0)
)
task_instance = TaskInstance(
task=task,
execution_date=datetime.utcnow().replace(tzinfo=pytz.utc))
bq_extractor = BigQueryExtractor(task)
tasks_meta_extract = bq_extractor.extract()
assert tasks_meta_extract is None
task_instance.run()
task_meta = bq_extractor.extract_on_complete(task_instance)
assert task_meta.run_facets['bigQuery_error'] == BigQueryErrorRunFacet(
clientError=mock.ANY
)
mock_client.return_value.get_job.assert_called_once_with(job_id=bq_job_id)
assert task_meta.inputs is not None
assert len(task_meta.inputs) == 0
assert task_meta.outputs is not None
assert len(task_meta.outputs) == 0
mock_client.return_value.close.assert_called()
class TestBigQueryExtractor(unittest.TestCase):
def setUp(self):
log.debug("TestBigQueryExtractor.setup(): ")
self.task = TestBigQueryExtractor._get_bigquery_task()
self.ti = TestBigQueryExtractor._get_ti(task=self.task)
self.bq_extractor = BigQueryExtractor(operator=self.task)
def test_extract(self):
log.info("test_extractor")
tasks_meta_extract = BigQueryExtractor(self.task).extract()
assert tasks_meta_extract is None
@mock.patch("airflow.models.TaskInstance.xcom_pull")
def test_get_xcom_bigquery_job_id(self, mock_xcom_pull):
self.bq_extractor._get_xcom_bigquery_job_id(self.ti)
mock_xcom_pull.assert_called_once_with(
task_ids=self.ti.task_id, key='job_id')
def test_nullable_chain_fails(self):
x = {"first": {"second": {}}}
assert get_from_nullable_chain(x, ['first', 'second', 'third']) is None
def test_nullable_chain_works(self):
x = {"first": {"second": {"third": 42}}}
assert get_from_nullable_chain(x, ['first', 'second', 'third']) == 42
x = {"first": {"second": {"third": 42, "fourth": {"empty": 56}}}}
assert get_from_nullable_chain(x, ['first', 'second', 'third']) == 42
@staticmethod
def _get_ti(task):
task_instance = TaskInstance(
task=task,
execution_date=datetime.utcnow().replace(tzinfo=pytz.utc),
state=State.RUNNING)
task_instance.job_id = random.randrange(10000)
return task_instance
@staticmethod
def _get_async_job(properties):
# BigQuery Job
class AsyncJob:
_properties = None
def __init__(self, _properties):
self._properties = _properties
return AsyncJob(_properties=properties)
@staticmethod
def _get_bigquery_task():
dag = DAG(dag_id='TestBigQueryExtractorE2E')
task = BigQueryOperator(
sql='select first_name, last_name from customers;',
task_id="task_id",
project_id="project_id",
dag_id="dag_id",
dag=dag,
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0)
)
return task
if __name__ == '__main__':
unittest.main()
| 33.987461 | 82 | 0.655691 |
bbe1c84be4cb3822d0c5c7e3e38b027fe248e438 | 6,438 | py | Python | tools/python/runfiles/runfiles_test.py | JoesaDmercado/bazel | 89b8f153d5ce145bbde177233eef02f4d16c2ab5 | [
"Apache-2.0"
] | 1 | 2018-04-12T15:36:03.000Z | 2018-04-12T15:36:03.000Z | tools/python/runfiles/runfiles_test.py | Corroler/bazel | 073ea095a6c6a826ccdbbce1b213de47115e701a | [
"Apache-2.0"
] | 1 | 2018-06-07T01:41:04.000Z | 2018-07-25T06:43:27.000Z | tools/python/runfiles/runfiles_test.py | Corroler/bazel | 073ea095a6c6a826ccdbbce1b213de47115e701a | [
"Apache-2.0"
] | null | null | null | # pylint: disable=g-bad-file-header
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from tools.python.runfiles import runfiles
class RunfilesTest(unittest.TestCase):
# """Unit tests for `runfiles.Runfiles`."""
def testRlocationArgumentValidation(self):
r = runfiles.Create({"RUNFILES_DIR": "whatever"})
self.assertRaises(ValueError, lambda: r.Rlocation(None))
self.assertRaises(ValueError, lambda: r.Rlocation(""))
self.assertRaises(TypeError, lambda: r.Rlocation(1))
self.assertRaisesRegexp(ValueError, "contains uplevel",
lambda: r.Rlocation("foo/.."))
self.assertRaisesRegexp(ValueError, "is absolute without a drive letter",
lambda: r.Rlocation("\\foo"))
def testCreatesManifestBasedRunfiles(self):
with _MockFile(contents=["a/b c/d"]) as mf:
r = runfiles.Create({
"RUNFILES_MANIFEST_FILE": mf.Path(),
"RUNFILES_DIR": "ignored when RUNFILES_MANIFEST_FILE has a value",
"TEST_SRCDIR": "always ignored",
})
self.assertEqual(r.Rlocation("a/b"), "c/d")
self.assertIsNone(r.Rlocation("foo"))
def testManifestBasedRunfilesEnvVars(self):
with _MockFile(name="MANIFEST") as mf:
r = runfiles.Create({
"RUNFILES_MANIFEST_FILE": mf.Path(),
"TEST_SRCDIR": "always ignored",
})
self.assertDictEqual(
r.EnvVars(), {
"RUNFILES_MANIFEST_FILE": mf.Path(),
"RUNFILES_DIR": mf.Path()[:-len("/MANIFEST")],
"JAVA_RUNFILES": mf.Path()[:-len("/MANIFEST")],
})
with _MockFile(name="foo.runfiles_manifest") as mf:
r = runfiles.Create({
"RUNFILES_MANIFEST_FILE": mf.Path(),
"TEST_SRCDIR": "always ignored",
})
self.assertDictEqual(
r.EnvVars(), {
"RUNFILES_MANIFEST_FILE":
mf.Path(),
"RUNFILES_DIR": (
mf.Path()[:-len("foo.runfiles_manifest")] + "foo.runfiles"),
"JAVA_RUNFILES": (
mf.Path()[:-len("foo.runfiles_manifest")] + "foo.runfiles"),
})
with _MockFile(name="x_manifest") as mf:
r = runfiles.Create({
"RUNFILES_MANIFEST_FILE": mf.Path(),
"TEST_SRCDIR": "always ignored",
})
self.assertDictEqual(
r.EnvVars(), {
"RUNFILES_MANIFEST_FILE": mf.Path(),
"RUNFILES_DIR": "",
"JAVA_RUNFILES": "",
})
def testCreatesDirectoryBasedRunfiles(self):
r = runfiles.Create({
"RUNFILES_DIR": "runfiles/dir",
"TEST_SRCDIR": "always ignored",
})
self.assertEqual(r.Rlocation("a/b"), "runfiles/dir/a/b")
self.assertEqual(r.Rlocation("foo"), "runfiles/dir/foo")
def testDirectoryBasedRunfilesEnvVars(self):
r = runfiles.Create({
"RUNFILES_DIR": "runfiles/dir",
"TEST_SRCDIR": "always ignored",
})
self.assertDictEqual(r.EnvVars(), {
"RUNFILES_DIR": "runfiles/dir",
"JAVA_RUNFILES": "runfiles/dir",
})
def testFailsToCreateManifestBasedBecauseManifestDoesNotExist(self):
def _Run():
runfiles.Create({"RUNFILES_MANIFEST_FILE": "non-existing path"})
self.assertRaisesRegexp(IOError, "non-existing path", _Run)
def testFailsToCreateAnyRunfilesBecauseEnvvarsAreNotDefined(self):
with _MockFile(contents=["a b"]) as mf:
runfiles.Create({
"RUNFILES_MANIFEST_FILE": mf.Path(),
"RUNFILES_DIR": "whatever",
"TEST_SRCDIR": "always ignored",
})
runfiles.Create({
"RUNFILES_DIR": "whatever",
"TEST_SRCDIR": "always ignored",
})
self.assertIsNone(runfiles.Create({"TEST_SRCDIR": "always ignored"}))
self.assertIsNone(runfiles.Create({"FOO": "bar"}))
def testManifestBasedRlocation(self):
with _MockFile(contents=[
"Foo/runfile1", "Foo/runfile2 C:/Actual Path\\runfile2",
"Foo/Bar/runfile3 D:\\the path\\run file 3.txt"
]) as mf:
r = runfiles.CreateManifestBased(mf.Path())
self.assertEqual(r.Rlocation("Foo/runfile1"), "Foo/runfile1")
self.assertEqual(r.Rlocation("Foo/runfile2"), "C:/Actual Path\\runfile2")
self.assertEqual(
r.Rlocation("Foo/Bar/runfile3"), "D:\\the path\\run file 3.txt")
self.assertIsNone(r.Rlocation("unknown"))
if RunfilesTest.IsWindows():
self.assertEqual(r.Rlocation("c:/foo"), "c:/foo")
self.assertEqual(r.Rlocation("c:\\foo"), "c:\\foo")
else:
self.assertEqual(r.Rlocation("/foo"), "/foo")
def testDirectoryBasedRlocation(self):
# The _DirectoryBased strategy simply joins the runfiles directory and the
# runfile's path on a "/". This strategy does not perform any normalization,
# nor does it check that the path exists.
r = runfiles.CreateDirectoryBased("foo/bar baz//qux/")
self.assertEqual(r.Rlocation("arg"), "foo/bar baz//qux/arg")
if RunfilesTest.IsWindows():
self.assertEqual(r.Rlocation("c:/foo"), "c:/foo")
self.assertEqual(r.Rlocation("c:\\foo"), "c:\\foo")
else:
self.assertEqual(r.Rlocation("/foo"), "/foo")
@staticmethod
def IsWindows():
return os.name == "nt"
class _MockFile(object):
def __init__(self, name=None, contents=None):
self._contents = contents or []
self._name = name or "x"
self._path = None
def __enter__(self):
tmpdir = os.environ.get("TEST_TMPDIR")
self._path = os.path.join(tempfile.mkdtemp(dir=tmpdir), self._name)
with open(self._path, "wt") as f:
f.writelines(l + "\n" for l in self._contents)
return self
def __exit__(self, exc_type, exc_value, traceback):
os.remove(self._path)
os.rmdir(os.path.dirname(self._path))
def Path(self):
return self._path
if __name__ == "__main__":
unittest.main()
| 35.180328 | 80 | 0.635446 |
32725e2c62b53ce55413466d07d5a35efa332fa2 | 45 | py | Python | punch_version.py | jnnr/oemof-tabular | ab58d8c3035b6e97d9d45169832745de11e5bb36 | [
"BSD-3-Clause"
] | 2 | 2019-12-09T17:34:31.000Z | 2022-02-04T12:55:15.000Z | punch_version.py | jnnr/oemof-tabular | ab58d8c3035b6e97d9d45169832745de11e5bb36 | [
"BSD-3-Clause"
] | 28 | 2018-11-24T16:56:55.000Z | 2022-03-25T12:19:41.000Z | punch_version.py | jnnr/oemof-tabular | ab58d8c3035b6e97d9d45169832745de11e5bb36 | [
"BSD-3-Clause"
] | 7 | 2018-12-19T13:42:52.000Z | 2021-11-21T18:43:45.000Z | major = 0
minor = 0
patch = 2
status = 'dev'
| 9 | 14 | 0.6 |
63910c516521486868a40c1db4e73b647262358d | 1,135 | py | Python | sdis/contrib/sites/migrations/0002_set_site_domain_and_name.py | dbca-wa/sdis4 | a40f6025bc6fc90dde251d65567a0c94f1f96967 | [
"MIT"
] | 1 | 2018-03-12T01:50:55.000Z | 2018-03-12T01:50:55.000Z | sdis/contrib/sites/migrations/0002_set_site_domain_and_name.py | dbca-wa/sdis4 | a40f6025bc6fc90dde251d65567a0c94f1f96967 | [
"MIT"
] | 3 | 2021-06-10T20:23:45.000Z | 2022-01-13T00:46:30.000Z | sdis/contrib/sites/migrations/0002_set_site_domain_and_name.py | dbca-wa/sdis4 | a40f6025bc6fc90dde251d65567a0c94f1f96967 | [
"MIT"
] | null | null | null | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.org/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "sdis.dpaw.wa.gov.au",
"name": "sdis"
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| 24.148936 | 130 | 0.639648 |
1dc970452d084af6100fafdb8b8b8b883ada1ffa | 7,322 | py | Python | trestle/core/jinja.py | jayhawk87/compliance-trestle | d0262826f30e0c7f89f8a3551b93142669fa2c66 | [
"Apache-2.0"
] | null | null | null | trestle/core/jinja.py | jayhawk87/compliance-trestle | d0262826f30e0c7f89f8a3551b93142669fa2c66 | [
"Apache-2.0"
] | null | null | null | trestle/core/jinja.py | jayhawk87/compliance-trestle | d0262826f30e0c7f89f8a3551b93142669fa2c66 | [
"Apache-2.0"
] | null | null | null | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trestle utilities to customize ."""
import logging
import frontmatter
from jinja2 import lexer, nodes
from jinja2.environment import Environment
from jinja2.ext import Extension
from jinja2.parser import Parser
from trestle.core import err
from trestle.core.markdown import markdown_node
logger = logging.getLogger(__name__)
def adjust_heading_level(input_md: str, expected: int) -> str:
"""Adjust the header level of a markdown string such that the most significant header matches the expected #'s."""
output_md = input_md
mdn = markdown_node.MarkdownNode.build_tree_from_markdown(input_md.split('\n'))
if mdn.subnodes:
mdn_top_heading = mdn.subnodes[0].get_node_header_lvl()
delta = int(expected) - mdn_top_heading
if not delta == 0:
mdn.change_header_level_by(delta)
output_md = mdn.content.raw_text
return output_md
class TrestleJinjaExtension(Extension):
"""Class to define common methods to be inherited from for use in trestle."""
# This
max_tag_parse = 20
def __init__(self, environment: Environment) -> None:
"""Ensure enviroment is set and carried into class vars."""
super().__init__(environment)
@staticmethod
def parse_expression(parser):
"""Safely parse jinja expression."""
# Licensed under MIT from:
# https://github.com/MoritzS/jinja2-django-tags/blob/master/jdj_tags/extensions.py#L424
# Due to how the jinja2 parser works, it treats "foo" "bar" as a single
# string literal as it is the case in python.
# But the url tag in django supports multiple string arguments, e.g.
# "{% url 'my_view' 'arg1' 'arg2' %}".
# That's why we have to check if it's a string literal first.
token = parser.stream.current
if token.test(lexer.TOKEN_STRING):
expr = nodes.Const(token.value, lineno=token.lineno)
next(parser.stream)
else:
expr = parser.parse_expression(False)
return expr
class MDSectionInclude(TrestleJinjaExtension):
"""Inject the parameter of the tag as the resulting content."""
tags = {'mdsection_include'}
def __init__(self, environment: Environment) -> None:
"""Ensure enviroment is set and carried into class vars."""
super().__init__(environment)
def parse(self, parser):
"""Execute parsing of md token and return nodes."""
kwargs = None
expected_heading_level = None
count = 0
while parser.stream.current.type != lexer.TOKEN_BLOCK_END:
count = count + 1
if count > self.max_tag_parse:
raise err.TrestleError('Unexpected Jinja tag structure provided, please review docs.')
token = parser.stream.current
if token.test('name:mdsection_include'):
parser.stream.expect(lexer.TOKEN_NAME)
markdown_source = parser.stream.expect(lexer.TOKEN_STRING)
section_title = parser.stream.expect(lexer.TOKEN_STRING)
elif kwargs is not None:
arg = token.value
next(parser.stream)
parser.stream.expect(lexer.TOKEN_ASSIGN)
token = parser.stream.current
exp = self.parse_expression(parser)
kwargs[arg] = exp.value
else:
if parser.stream.look().type == lexer.TOKEN_ASSIGN:
kwargs = {}
continue
# Use the established environment to source the file
md_content, _, _ = self.environment.loader.get_source(self.environment, markdown_source.value)
fm = frontmatter.loads(md_content)
if not fm.metadata == {}:
logger.warning('Non zero metadata on MD section include - ignoring')
full_md = markdown_node.MarkdownNode.build_tree_from_markdown(fm.content.split('\n'))
md_section = full_md.get_node_for_key(section_title.value, strict_matching=True)
# adjust
if kwargs is not None:
expected_heading_level = kwargs.get('heading_level')
if expected_heading_level is not None:
level = md_section.get_node_header_lvl()
delta = int(expected_heading_level) - level
if not delta == 0:
md_section.change_header_level_by(delta)
if not md_section:
raise err.TrestleError(
f'Unable to retrieve section "{section_title.value}"" from {markdown_source.value} jinja template.'
)
local_parser = Parser(self.environment, md_section.content.raw_text)
top_level_output = local_parser.parse()
return top_level_output.body
class MDCleanInclude(TrestleJinjaExtension):
"""Inject the parameter of the tag as the resulting content."""
tags = {'md_clean_include'}
def __init__(self, environment: Environment) -> None:
"""Ensure enviroment is set and carried into class vars."""
super().__init__(environment)
def parse(self, parser):
"""Execute parsing of md token and return nodes."""
kwargs = None
expected_heading_level = None
count = 0
while parser.stream.current.type != lexer.TOKEN_BLOCK_END:
count = count + 1
if count > self.max_tag_parse:
raise err.TrestleError('Unexpected Jinja tag structure provided, please review docs.')
token = parser.stream.current
if token.test('name:md_clean_include'):
parser.stream.expect(lexer.TOKEN_NAME)
markdown_source = parser.stream.expect(lexer.TOKEN_STRING)
elif kwargs is not None:
arg = token.value
next(parser.stream)
parser.stream.expect(lexer.TOKEN_ASSIGN)
token = parser.stream.current
exp = self.parse_expression(parser)
kwargs[arg] = exp.value
else:
if parser.stream.look().type == lexer.TOKEN_ASSIGN:
kwargs = {}
continue
md_content, _, _ = self.environment.loader.get_source(self.environment, markdown_source.value)
fm = frontmatter.loads(md_content)
content = fm.content
content += '\n\n'
if kwargs is not None:
expected_heading_level = kwargs.get('heading_level')
if expected_heading_level is not None:
content = adjust_heading_level(content, expected_heading_level)
local_parser = Parser(self.environment, content)
top_level_output = local_parser.parse()
return top_level_output.body
| 40.677778 | 118 | 0.647228 |
0969a3e71f8f97cc5d824c2b4755427e17051e9a | 499 | py | Python | core/migrations/0023_auto_20210502_1749.py | Braineanear/AL3wn | b523d25386a8b2e45487d1b9d2c78cead623c1e6 | [
"Apache-2.0"
] | null | null | null | core/migrations/0023_auto_20210502_1749.py | Braineanear/AL3wn | b523d25386a8b2e45487d1b9d2c78cead623c1e6 | [
"Apache-2.0"
] | null | null | null | core/migrations/0023_auto_20210502_1749.py | Braineanear/AL3wn | b523d25386a8b2e45487d1b9d2c78cead623c1e6 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.4 on 2021-05-02 15:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0022_auto_20210329_1751'),
]
operations = [
migrations.AlterField(
model_name='outerexam',
name='year',
field=models.CharField(choices=[('1', '1'), ('2', '2'), ('3', '3'), ('3Open', '3open')], max_length=255, verbose_name='Academic Year'),
),
]
| 26.263158 | 148 | 0.55511 |
3f624cb5f562fa25ff153249401d046947fe480c | 48 | py | Python | iam/__version__.py | uddmorningsun/iam-python-sdk | 5afe5de520632d1cd67626e0ef04264fa7a95d40 | [
"MIT"
] | 10 | 2021-03-08T09:50:12.000Z | 2022-02-09T04:08:09.000Z | iam/__version__.py | uddmorningsun/iam-python-sdk | 5afe5de520632d1cd67626e0ef04264fa7a95d40 | [
"MIT"
] | 11 | 2021-03-15T03:03:15.000Z | 2022-03-04T03:15:22.000Z | iam/__version__.py | uddmorningsun/iam-python-sdk | 5afe5de520632d1cd67626e0ef04264fa7a95d40 | [
"MIT"
] | 11 | 2021-03-08T09:19:47.000Z | 2021-11-22T02:36:25.000Z | # -*- coding: utf-8 -*-
__version__ = "1.1.20"
| 12 | 23 | 0.520833 |
9a5001bb164a555e5d3aaeb7b48e49317018016b | 5,409 | py | Python | tests/integration/moto_server.py | neuro-inc/platform-buckets-api | ba04edeb8565fa06e5af6d0316957a8816b087b2 | [
"Apache-2.0"
] | null | null | null | tests/integration/moto_server.py | neuro-inc/platform-buckets-api | ba04edeb8565fa06e5af6d0316957a8816b087b2 | [
"Apache-2.0"
] | 55 | 2021-11-16T00:26:52.000Z | 2022-03-29T03:16:55.000Z | tests/integration/moto_server.py | neuro-inc/platform-buckets-api | ba04edeb8565fa06e5af6d0316957a8816b087b2 | [
"Apache-2.0"
] | null | null | null | import asyncio
import json
import logging
import os
from collections.abc import AsyncIterator, Iterator
import aiobotocore.session
import aiohttp
import pytest
from aiobotocore.client import AioBaseClient
from async_timeout import timeout
from docker import DockerClient
from docker.errors import NotFound as ContainerNotFound
from docker.models.containers import Container
from yarl import URL
from tests.integration.conftest import MotoConfig
logger = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def moto_container_image() -> str:
return "romasku/moto-patched"
@pytest.fixture(scope="session")
def moto_container_name() -> str:
return "platform-buckets-api-moto-server"
@pytest.fixture(scope="session")
def auth_jwt_secret() -> str:
return os.environ.get("NP_JWT_SECRET", "secret")
def _create_url(container: Container, in_docker: bool) -> URL:
exposed_port = 5000
if in_docker:
host, port = container.attrs["NetworkSettings"]["IPAddress"], exposed_port
else:
host, port = "0.0.0.0", container.ports[f"{exposed_port}/tcp"][0]["HostPort"]
return URL(f"http://{host}:{port}")
@pytest.fixture(scope="session")
def _auth_url() -> URL:
return URL(os.environ.get("AUTH_URL", ""))
@pytest.fixture(scope="session")
def _moto_server(
docker_client: DockerClient,
in_docker: bool,
reuse_docker: bool,
moto_container_image: str,
moto_container_name: str,
) -> Iterator[URL]:
try:
container = docker_client.containers.get(moto_container_name)
if reuse_docker:
yield _create_url(container, in_docker)
return
else:
container.remove(force=True)
except ContainerNotFound:
pass
# `run` performs implicit `pull`
container = docker_client.containers.run(
image=moto_container_image,
name=moto_container_name,
publish_all_ports=True,
stdout=False,
stderr=False,
detach=True,
environment={"INITIAL_NO_AUTH_ACTION_COUNT": 1},
)
container.reload()
yield _create_url(container, in_docker)
if not reuse_docker:
container.remove(force=True)
async def wait_for_moto_server(
url: URL, timeout_s: float = 300, interval_s: float = 1
) -> None:
last_exc = None
try:
async with timeout(timeout_s):
while True:
try:
async with aiohttp.ClientSession() as session:
async with session.get(f"{url}/moto-api/"):
return
except (AssertionError, OSError, aiohttp.ClientError) as exc:
last_exc = exc
logger.debug(f"waiting for {url}: {last_exc}")
await asyncio.sleep(interval_s)
except asyncio.TimeoutError:
pytest.fail(f"failed to connect to {url}: {last_exc}")
@pytest.fixture()
async def moto_server(_moto_server: URL) -> AsyncIterator[MotoConfig]:
await wait_for_moto_server(_moto_server)
async with aiohttp.ClientSession() as session:
async with session.post(f"{_moto_server}/moto-api/reset"):
pass
async with session.post(f"{_moto_server}/moto-api/reset-auth", data=b"4"):
pass
boto_session = aiobotocore.session.get_session()
async with boto_session.create_client("iam", endpoint_url=str(_moto_server)) as iam:
create_user_resp = await iam.create_user(UserName="admin")
keys = (await iam.create_access_key(UserName="admin"))["AccessKey"]
policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": ["*"], "Resource": "*"}],
}
policy_arn = (
await iam.create_policy(
PolicyName="admin_policy", PolicyDocument=json.dumps(policy_document)
)
)["Policy"]["Arn"]
await iam.attach_user_policy(UserName="admin", PolicyArn=policy_arn)
yield MotoConfig(
url=_moto_server,
admin_user_arn=create_user_resp["User"]["Arn"],
admin_access_key_id=keys["AccessKeyId"],
admin_secret_access_key=keys["SecretAccessKey"],
)
@pytest.fixture()
async def s3(moto_server: MotoConfig) -> AsyncIterator[AioBaseClient]:
session = aiobotocore.session.get_session()
async with session.create_client(
"s3",
endpoint_url=str(moto_server.url),
aws_access_key_id=moto_server.admin_access_key_id,
aws_secret_access_key=moto_server.admin_secret_access_key,
) as s3_client:
yield s3_client
@pytest.fixture()
async def iam(moto_server: MotoConfig) -> AsyncIterator[AioBaseClient]:
session = aiobotocore.session.get_session()
async with session.create_client(
"iam",
endpoint_url=str(moto_server.url),
aws_access_key_id=moto_server.admin_access_key_id,
aws_secret_access_key=moto_server.admin_secret_access_key,
) as iam_client:
yield iam_client
@pytest.fixture()
async def sts(moto_server: MotoConfig) -> AsyncIterator[AioBaseClient]:
session = aiobotocore.session.get_session()
async with session.create_client(
"sts",
endpoint_url=str(moto_server.url),
aws_access_key_id=moto_server.admin_access_key_id,
aws_secret_access_key=moto_server.admin_secret_access_key,
) as iam_client:
yield iam_client
| 31.086207 | 88 | 0.672768 |
4bf2a38c5030564c0e986bc2e40f1ed9cf0a33d1 | 4,123 | py | Python | structural/composite_concept.py | EdiBoba/python_patterns | b3343eed5592beea2996316feb8df4bad107e1fc | [
"MIT"
] | 2 | 2022-02-08T16:30:22.000Z | 2022-03-16T08:20:25.000Z | structural/composite_concept.py | EdiBoba/python_patterns | b3343eed5592beea2996316feb8df4bad107e1fc | [
"MIT"
] | null | null | null | structural/composite_concept.py | EdiBoba/python_patterns | b3343eed5592beea2996316feb8df4bad107e1fc | [
"MIT"
] | 3 | 2021-08-06T15:47:47.000Z | 2021-12-09T18:59:38.000Z | from abc import ABC, abstractmethod
from typing import List
class Component(ABC):
"""
Базовый класс Компонент объявляет общие операции как для простых, так и для
сложных объектов структуры.
"""
_parent: 'Component'
@property
def parent(self) -> 'Component':
return self._parent
@parent.setter
def parent(self, parent: 'Component'):
"""
При необходимости базовый Компонент может объявить интерфейс для
установки и получения родителя компонента в древовидной структуре. Он
также может предоставить некоторую реализацию по умолчанию для этих
методов.
"""
self._parent = parent
def add(self, component: 'Component') -> None:
pass
def remove(self, component: 'Component') -> None:
pass
@abstractmethod
def operation(self) -> str:
"""
Базовый Компонент может сам реализовать некоторое поведение по умолчанию
или поручить это конкретным классам, объявив метод, содержащий поведение
абстрактным.
"""
pass
def is_composite(self) -> bool:
"""
Вы можете предоставить метод, который позволит клиентскому коду понять,
может ли компонент иметь вложенные объекты.
"""
return False
class Leaf(Component):
"""
Класс Лист представляет собой конечные объекты структуры. Лист не может
иметь вложенных компонентов.
Обычно объекты Листьев выполняют фактическую работу, тогда как объекты
Контейнера лишь делегируют работу своим подкомпонентам.
"""
def operation(self) -> str:
return "Лист"
class Composite(Component):
"""
Класс Контейнер содержит сложные компоненты, которые могут иметь вложенные
компоненты. Обычно объекты Контейнеры делегируют фактическую работу своим
детям, а затем «суммируют» результат.
"""
_children: List[Component]
def __init__(self) -> None:
self._children = []
"""
Объект контейнера может как добавлять компоненты в свой список вложенных
компонентов, так и удалять их, как простые, так и сложные.
"""
def add(self, component: Component) -> None:
self._children.append(component)
component.parent = self
def remove(self, component: Component) -> None:
self._children.remove(component)
component.parent = None
def is_composite(self) -> bool:
return True
def operation(self) -> str:
"""
Контейнер выполняет свою основную логику особым образом. Он проходит
рекурсивно через всех своих детей, собирая и суммируя их результаты.
Поскольку потомки контейнера передают эти вызовы своим потомкам и так
далее, в результате обходится всё дерево объектов.
"""
results = []
for child in self._children:
results.append(child.operation())
return f"Ветка({'+'.join(results)})"
def client_code(component: Component) -> None:
"""
Клиентский код работает со всеми компонентами через базовый интерфейс.
"""
print(f"RESULT: {component.operation()}")
def client_code2(component1: Component, component2: Component) -> None:
"""
Благодаря тому, что операции управления потомками объявлены в базовом классе
Компонента, клиентский код может работать как с простыми, так и со сложными
компонентами, вне зависимости от их конкретных классов.
"""
if component1.is_composite():
component1.add(component2)
print(f"RESULT: {component1.operation()}")
if __name__ == "__main__":
simple = Leaf()
print("Client: я могу получить простой компонент:")
client_code(simple)
print("\n")
tree = Composite()
branch1 = Composite()
branch1.add(Leaf())
branch1.add(Leaf())
branch2 = Composite()
branch2.add(Leaf())
tree.add(branch1)
tree.add(branch2)
print("Client: я могу получить сложный компонент:")
client_code(tree)
print("\n")
print("Client: мне не надо проверять типы классов компонентов, "
"когда я работаю с деревом:")
client_code2(tree, simple)
| 28.832168 | 80 | 0.663109 |
4d5829da32eb9fee515d29603c37f8dcf677e0a8 | 2,779 | py | Python | setup.py | zmetcalf/django-widgy | 41f0f791bc8aaf003c10e3c41dffd68ea5c28d48 | [
"Apache-2.0"
] | null | null | null | setup.py | zmetcalf/django-widgy | 41f0f791bc8aaf003c10e3c41dffd68ea5c28d48 | [
"Apache-2.0"
] | null | null | null | setup.py | zmetcalf/django-widgy | 41f0f791bc8aaf003c10e3c41dffd68ea5c28d48 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import os
import sys
__doc__ = """
A CMS framework for Django built on a heterogenous tree editor.
"""
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
pytest.main(self.test_args)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# In install_requires and test_requires, 'foo' should come after 'foo-*' (eg
# django should come after django-treebeard). See
# <https://bitbucket.org/pypa/setuptools/issue/196/tests_require-pytest-pytest-cov-breaks>.
install_requires = [
'django-treebeard',
'django-pyscss',
'six',
'django-compressor>=1.3',
'beautifulsoup4',
'django-argonauts>=1.0.0',
'django<1.8',
]
# Markdown stops support for Python 2.6 in version 2.5
if sys.version_info < (2, 7):
install_requires.append('markdown<2.5')
else:
install_requires.append('markdown')
extras_require = {
'widgy_mezzanine': [
'mezzanine>=3.1.10',
],
'page_builder': [
'django-filer>=0.9.9',
'markdown',
'bleach',
'sorl-thumbnail>=11.12',
],
'form_builder': [
'django-extensions',
'html2text>=3.200.3',
'phonenumbers>=5',
],
}
extras_require['all'] = set(j for i in extras_require.values() for j in i)
setup(
name='django-widgy',
version='0.6.0.dev0',
author='Fusionbox, Inc.',
author_email='programmers@fusionbox.com',
description=__doc__,
long_description=read('README.rst') + '\n\n' + read('CHANGELOG.rst'),
url='http://docs.wid.gy/',
packages=[package for package in find_packages() if package.startswith('widgy')],
install_requires=install_requires,
extras_require=extras_require,
zip_safe=False,
include_package_data=True,
tests_require=['mock', 'dj-database-url', 'pytest-django', 'pytest'],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Natural Language :: English',
'Programming Language :: JavaScript',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| 28.357143 | 91 | 0.639079 |
7167f93758bbf1998011f4f3c2effb389f694a80 | 1,411 | py | Python | examples/copy_and_paste.py | MrTeferi/photoshop-python-api | 3467c345c836e0515af793d365edb83fe9166643 | [
"MIT"
] | 270 | 2020-04-11T22:41:20.000Z | 2022-03-29T09:16:43.000Z | examples/copy_and_paste.py | MrTeferi/photoshop-python-api | 3467c345c836e0515af793d365edb83fe9166643 | [
"MIT"
] | 67 | 2020-04-18T08:12:34.000Z | 2022-03-31T16:51:56.000Z | examples/copy_and_paste.py | MrTeferi/photoshop-python-api | 3467c345c836e0515af793d365edb83fe9166643 | [
"MIT"
] | 32 | 2020-05-13T10:51:04.000Z | 2022-03-19T02:53:43.000Z | """
References:
https://github.com/lohriialo/photoshop-scripting-python/blob/master/CopyAndPaste.py
"""
# Import local modules
import photoshop.api as ps
app = ps.Application()
startRulerUnits = app.preferences.rulerUnits
app.preferences.rulerUnits = ps.Units.Inches
doc = app.documents.add(
7, 5, 72, None, ps.NewDocumentMode.NewRGB, ps.DocumentFill.White
)
# Make sure the active layer is not a text layer, which cannot be copied to the
# clipboard.
if doc.activeLayer.kind != ps.LayerKind.TextLayer:
# Select the left half of the document. Selections are always expressed
# in pixels regardless of the current ruler unit type, so we're computing
# the selection corner points based on the inch unit width and height
# of the document
x2 = (doc.width * doc.resolution) / 2
y2 = doc.height * doc.resolution
sel_area = ((0, 0), (x2, 0), (x2, y2), (0, y2))
doc.selection.select(sel_area, ps.SelectionType.ReplaceSelection, 0, False)
doc.selection.copy()
# The new doc is created
# need to change ruler units to pixels because x2 and y2 are pixel units.
app.preferences.rulerUnits = ps.Units.Pixels
pasteDoc = app.documents.add(x2, y2, doc.resolution, "Paste Target")
pasteDoc.paste()
else:
print("You cannot copy from a text layer")
if startRulerUnits != app.preferences.rulerUnits:
app.preferences.rulerUnits = startRulerUnits
| 30.673913 | 87 | 0.719348 |
a5fb3741b081bc081ab240abcd2d062959e17059 | 1,660 | py | Python | main.py | pders01/check-library-on-protondb | ea85bb03f815b7b9c5c327769bcae573e695e3c9 | [
"MIT"
] | null | null | null | main.py | pders01/check-library-on-protondb | ea85bb03f815b7b9c5c327769bcae573e695e3c9 | [
"MIT"
] | null | null | null | main.py | pders01/check-library-on-protondb | ea85bb03f815b7b9c5c327769bcae573e695e3c9 | [
"MIT"
] | null | null | null | import argparse
from operator import contains
from os import listdir
from os.path import isfile, join
from pprint import pprint
import requests
import json
import uuid
BASE_URL = 'https://protondb.max-p.me'
def parse_args():
parser = argparse.ArgumentParser(description='Supply full paths to your game libraries. ')
parser.add_argument('path')
args = parser.parse_args()
return args.path
def check_path_for_gamedirs(path):
games = [d for d in listdir(path)]
filtered_games = [game for game in games if not "." in game]
return filtered_games
def query_protondb_api(url):
response = requests.get(url)
return response.json()
def filter_api_results(games, api_data):
api_data_key_val_tuples = [entry.items() for entry in api_data]
game_metadata = {}
result = []
for appId, title in api_data_key_val_tuples:
random_id = str(uuid.uuid4())[:8]
game_metadata[random_id] = {'appId': appId[1], 'title': title[1]}
for game in games:
for id, data in game_metadata.items():
if game == data['title']:
result.append(game_metadata[id])
return result
def get_reports_from_found_games(found_games):
for found_game in found_games:
response = requests.get(f"{BASE_URL}/games/{found_game['appId']}/reports/")
return response.json()
def main():
path = parse_args()
games = check_path_for_gamedirs(path)
api_data = query_protondb_api(BASE_URL + '/games')
found_games = filter_api_results(games, api_data)
reports = get_reports_from_found_games(found_games)
pprint(reports)
if __name__ == "__main__":
main() | 28.62069 | 94 | 0.693373 |
b77ccd38ec4f0fffd983acfe05258b58d1951f88 | 1,740 | py | Python | step2/asprep/as_2a.py | funderburkjim/boesp-prep | a8738671a8f8760feb945f4932bf4f2d19ed4f86 | [
"MIT"
] | null | null | null | step2/asprep/as_2a.py | funderburkjim/boesp-prep | a8738671a8f8760feb945f4932bf4f2d19ed4f86 | [
"MIT"
] | 50 | 2021-08-28T23:02:22.000Z | 2022-01-18T18:31:21.000Z | step2/asprep/as_2a.py | funderburkjim/boesp-prep | a8738671a8f8760feb945f4932bf4f2d19ed4f86 | [
"MIT"
] | 1 | 2021-09-02T04:36:26.000Z | 2021-09-02T04:36:26.000Z | # coding=utf-8
""" as_2.py
"""
from __future__ import print_function
import sys, re,codecs
from pwgbib_altera import read_pwgbib
class AS(object):
def __init__(self,line):
self.line = line.rstrip('\r\n')
self.code,self.count = self.line.split(r' : ')
self.pwgrec = None
def read_as_1(filein):
with codecs.open(filein,"r","utf-8") as f:
recs = [AS(x) for x in f]
return recs
def match_as_pwg(asrecs,pwgrecs):
d = {}
for pwgrec in pwgrecs:
key = pwgrec.pwgcode1
if key in d:
oldrec = d[key]
oldcode = oldrec.pwgcode
ident = pwgrec.ident
oldident = oldrec.ident
print('duplicate pwgkey',key,pwgrec.pwgcode,ident,oldcode,oldident)
d[key] = pwgrec
#
for asrec in asrecs:
key = asrec.code
if key in d:
asrec.pwgrec = d[key]
def write(asrecs,fileout):
recs = asrecs
with codecs.open(fileout,"w","utf-8") as f:
nfound = 0
ncount1 = 0 # count of 'found'
ncount2 = 0 # count of 'not found'
for rec in recs:
key = rec.code
count = rec.count
if rec.pwgrec:
text = rec.pwgrec.tooltip
nfound = nfound + 1
ncount1 = ncount1 + int(count)
else:
text = 'NOTPWG'
ncount2 = ncount2 + int(count)
out = '%s : %s : %s' %(key,count,text)
f.write(out+'\n')
print(len(recs),"records written to",fileout)
print(nfound,'records found in PWG')
print(ncount1,'total count for abbreviations found in PWG')
print(ncount2,'total count for abbreviations not found in PWG')
if __name__=="__main__":
#test()
filein = sys.argv[1] # as_1.txt
filein1 = sys.argv[2] # pwgbib.txt
fileout = sys.argv[3] # as_2.txt
pwgrecs = read_pwgbib(filein1)
asrecs = read_as_1(filein)
# now match, adding field to AS objects
match_as_pwg(asrecs,pwgrecs)
write(asrecs,fileout)
| 24.507042 | 70 | 0.67069 |
99d1492a50ab8306e37f3871cd3ce1f7fdefe338 | 2,794 | py | Python | datapack/data/scripts/quests/638_SeekersOfTheHolyGrail/__init__.py | DigitalCoin1/L2SPERO | f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94 | [
"Unlicense"
] | null | null | null | datapack/data/scripts/quests/638_SeekersOfTheHolyGrail/__init__.py | DigitalCoin1/L2SPERO | f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94 | [
"Unlicense"
] | null | null | null | datapack/data/scripts/quests/638_SeekersOfTheHolyGrail/__init__.py | DigitalCoin1/L2SPERO | f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94 | [
"Unlicense"
] | null | null | null | import sys
from com.l2jfrozen import Config
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "638_SeekersOfTheHolyGrail"
DROP_CHANCE = 30
#NPC
INNOCENTIN = 31328
#MOBS
MOBS = range(22138,22175)
#ITEM
TOTEM = 8068
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [TOTEM]
def onAdvEvent (self,event,npc, player) :
htmltext = event
st = player.getQuestState(qn)
if not st : return
if event == "31328-02.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "31328-06.htm" :
st.playSound("ItemSound.quest_finish")
st.exitQuest(1)
return htmltext
def onTalk (self, npc, player) :
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
level = player.getLevel()
id = st.getState()
if level >= 73 :
if id == CREATED :
htmltext = "31328-01.htm"
elif id == STARTED and st.getQuestItemsCount(TOTEM) >= 2000 :
rr = st.getRandom(3)
if rr == 0 :
st.takeItems(TOTEM,2000)
st.giveItems(959,st.getRandom(4)+3)
st.playSound("ItemSound.quest_middle")
if rr == 1 :
st.takeItems(TOTEM,2000)
st.giveItems(57,3576000)
st.playSound("ItemSound.quest_middle")
if rr == 2 :
st.takeItems(TOTEM,2000)
st.giveItems(960,st.getRandom(4)+3)
st.playSound("ItemSound.quest_middle")
htmltext = "31328-03.htm"
else :
htmltext = "31328-04.htm"
else :
htmltext = "31328-00.htm"
st.exitQuest(1)
return htmltext
def onKill(self, npc, player, isPet) :
partyMember = self.getRandomPartyMember(player,"1")
if not partyMember: return
st = partyMember.getQuestState(qn)
if st :
count = st.getQuestItemsCount(TOTEM)
if st.getInt("cond") == 1 :
chance = DROP_CHANCE * Config.RATE_DROP_QUEST
numItems, chance = divmod(chance,100)
if st.getRandom(100) < chance :
numItems += 1
if numItems :
st.playSound("ItemSound.quest_itemget")
st.giveItems(TOTEM,int(numItems))
return
QUEST = Quest(638,qn,"Seekers of the Holy Grail")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(INNOCENTIN)
QUEST.addTalkId(INNOCENTIN)
for i in MOBS :
QUEST.addKillId(i) | 28.510204 | 153 | 0.639227 |
00882248e3b032c3cf10de3eec40c882e8ae846a | 238 | py | Python | src/pyrepo/__init__.py | energyinpython/pre-pyrepo | 92e44594e12d1110247f011e51734e5ce1fe0b8e | [
"MIT"
] | null | null | null | src/pyrepo/__init__.py | energyinpython/pre-pyrepo | 92e44594e12d1110247f011e51734e5ce1fe0b8e | [
"MIT"
] | null | null | null | src/pyrepo/__init__.py | energyinpython/pre-pyrepo | 92e44594e12d1110247f011e51734e5ce1fe0b8e | [
"MIT"
] | null | null | null | from . import mcda_methods
from . import additions
from . import compromise_rankings
from . import correlations
from . import distance_metrics
from . import normalizations
from . import sensitivity_analysis
from . import weighting_methods | 29.75 | 34 | 0.836134 |
516a257a9c513b976e6e8b392b61507a3c65a068 | 4,472 | py | Python | plotters/bar_timeline_plotter.py | msingam/covid-chicago | 761abfec2ef40bfb7084009d456ad6dafe3b24f4 | [
"Apache-2.0"
] | null | null | null | plotters/bar_timeline_plotter.py | msingam/covid-chicago | 761abfec2ef40bfb7084009d456ad6dafe3b24f4 | [
"Apache-2.0"
] | null | null | null | plotters/bar_timeline_plotter.py | msingam/covid-chicago | 761abfec2ef40bfb7084009d456ad6dafe3b24f4 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import pandas as pd
import numpy as np
import sys
sys.path.append('../')
from load_paths import load_box_paths
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import seaborn as sns
#from plotting.colors import load_color_palette
mpl.rcParams['pdf.fonttype'] = 42
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-e",
"--exp_names",
type=str,
nargs='+',
help="Experiment names to compare, example python data_comparison_spatial_2.py -e exp_name1 exp_name2"
)
parser.add_argument(
"-l",
"--labels",
type=str,
nargs='+',
help="Experiment labels, if not specified will be extracted from exp_names"
)
parser.add_argument(
"-ch",
"--channel",
type=str,
default = 'deaths',
help="Outcome channel to plot"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default = "Local"
)
return parser.parse_args()
def cumulative_barplot(exp_names,channel,labels, first_day,last_day, region="All"):
fig = plt.figure(figsize=(6, 4))
fig.subplots_adjust(left=0.2)
ax = fig.gca()
#p = load_color_palette('wes')
#palette = [p[x] for x in [8, 4, 2, 1, 3]]
palette = ('#913058', "#F6851F", "#00A08A", "#D61B5A", "#5393C3", "#F1A31F", "#98B548", "#8971B3", "#969696")
first_md = first_day.strftime('%b %d')
last_md = last_day.strftime('%b %d')
for s, exp_name in enumerate(exp_names):
simpath = os.path.join(projectpath, 'cms_sim', 'simulation_output', exp_name)
exp_date = exp_name.split("_")[0]
fname = f'nu_{exp_date}_{region}.csv'
df = pd.read_csv(os.path.join(simpath, fname))
df['date'] = pd.to_datetime(df['date'])
df = df[df['date'].between(pd.Timestamp(first_day), pd.Timestamp(last_day))]
ax.bar([s], np.sum(df['%s_median' % channel]), align='center', color=palette[s], label=labels[s])
ax.plot([s, s], [np.sum(df['%s_lower' % channel]), np.sum(df['%s_upper' % channel])], color='k', linewidth=0.5)
ax.legend()
ax.set_ylabel(f'cumulative {channel} {first_md} - {last_md}')
plt.savefig(os.path.join(plot_path, f'{channel}_barplot.png'))
plt.savefig(os.path.join(plot_path,"pdf", f'{channel}_barplot.pdf'), format='PDF')
#plt.show()
def timeline_plot(exp_names,channel,labels, first_day,last_day, region="All"):
fig = plt.figure(figsize=(6, 4))
fig.subplots_adjust(left=0.2)
ax = fig.gca()
#p = load_color_palette('wes')
#palette = [p[x] for x in [8, 4, 2, 1, 3]]
palette = ('#913058', "#F6851F", "#00A08A", "#D61B5A", "#5393C3", "#F1A31F", "#98B548", "#8971B3", "#969696")
for s, exp_name in enumerate(exp_names):
simpath = os.path.join(projectpath, 'cms_sim', 'simulation_output', exp_name)
exp_date = exp_name.split("_")[0]
fname = f'nu_{exp_date}_{region}.csv'
df = pd.read_csv(os.path.join(simpath, fname))
df['date'] = pd.to_datetime(df['date'])
df = df[df['date'].between(pd.Timestamp(first_day), pd.Timestamp(last_day))]
ax.plot(df['date'], df['%s_median' % channel], color=palette[s], label=labels[s])
ax.fill_between(df['date'], df['%s_lower' % channel], df['%s_upper' % channel], color=palette[s], linewidth=0, alpha=0.4)
ax.legend()
ax.set_ylabel(f'{channel}')
plt.savefig(os.path.join(plot_path, f'{channel}_timelineplot.png'))
plt.savefig(os.path.join(plot_path,"pdf", f'{channel}_timelineplot.pdf'), format='PDF')
#plt.show()
if __name__ == '__main__':
args = parse_args()
exp_names = args.exp_names
labels = args.labels
channel = args.channel
Location = args.Location
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location)
if labels == None:
labels = [''.join(exp.split("_")[-3:]) for exp in exp_names]
plot_path = os.path.join(wdir, 'simulation_output', exp_names[len(exp_names) - 1], '_plots')
first_day = pd.Timestamp('2020-02-01')
last_day = pd.Timestamp('2021-06-01')
cumulative_barplot(exp_names,channel,labels, first_day,last_day, region="All")
timeline_plot(exp_names,channel,labels, first_day,last_day, region="All")
| 34.9375 | 129 | 0.633945 |
a9c8d74fb9e7a10fc0dfef47cfb3ac7c7187bde9 | 190 | py | Python | TkinterWithMatplotlib-dev/je_matplotlib_wrapper/save_as_png/save_as_png.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | TkinterWithMatplotlib-dev/je_matplotlib_wrapper/save_as_png/save_as_png.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | TkinterWithMatplotlib-dev/je_matplotlib_wrapper/save_as_png/save_as_png.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | from matplotlib import pyplot
def save_as_png(filename: str, what_plot_you_want_to_save: pyplot, **kwargs):
what_plot_you_want_to_save.savefig(filename, bbox_inches='tight', **kwargs)
| 31.666667 | 79 | 0.8 |
b4600257be196a4736f5f1c602134c2ed2448b5d | 3,188 | py | Python | library/nsxt_compute_collection_transport_templates_facts.py | lcamarda/nsxtlivefire-v2-siteb | 7d0dcba24fb57728230f9f04494fad63f8054d10 | [
"BSD-2-Clause"
] | 6 | 2020-03-25T16:49:52.000Z | 2020-04-11T16:01:35.000Z | library/nsxt_compute_collection_transport_templates_facts.py | khyoon/nsxtlivefire-v2-siteb | 1a21367dae2bee66ec96402a89fb82ab3f6f58ea | [
"BSD-2-Clause"
] | 3 | 2020-03-26T19:30:15.000Z | 2020-04-16T22:17:24.000Z | library/nsxt_compute_collection_transport_templates_facts.py | khyoon/nsxtlivefire-v2-siteb | 1a21367dae2bee66ec96402a89fb82ab3f6f58ea | [
"BSD-2-Clause"
] | 3 | 2019-07-24T02:03:52.000Z | 2021-06-15T22:19:51.000Z | #!/usr/bin/env python
#
# Copyright 2018 VMware, Inc.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_compute_collection_transport_templates_facts
short_description: 'List compute collection transportnode templates'
description: 'Returns all eligible compute collection transportnode templates'
version_added: '2.7'
author: 'Rahul Raghuvanshi'
options:
hostname:
description: 'Deployed NSX manager hostname.'
required: true
type: str
username:
description: 'The username to authenticate with the NSX manager.'
required: true
type: str
password:
description: 'The password to authenticate with the NSX manager.'
required: true
type: str
'''
EXAMPLES = '''
- name: List compute collection transport template
nsxt_fabric_compute_collection_transport_node_templates_facts:
hostname: "10.192.167.137"
username: "admin"
password: "Admin!23Admin"
validate_certs: False
'''
RETURN = '''# '''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware_nsxt import vmware_argument_spec, request
from ansible.module_utils.urls import open_url, fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError
def main():
argument_spec = vmware_argument_spec()
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
mgr_hostname = module.params['hostname']
mgr_username = module.params['username']
mgr_password = module.params['password']
validate_certs = module.params['validate_certs']
manager_url = 'https://{}/api/v1'.format(mgr_hostname)
changed = False
try:
(rc, resp) = request(manager_url+ '/compute-collection-transport-node-templates', headers=dict(Accept='application/json'),
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
except Exception as err:
module.fail_json(msg='Error accessing copmpute collection fabric templates. Error [%s]' % (to_native(err)))
module.exit_json(changed=changed, **resp)
if __name__ == '__main__':
main()
| 37.952381 | 136 | 0.741217 |
134cb260436ed3ea534264fec0b449c9181694b8 | 87,257 | py | Python | tests/python/unittest/test_sparse_operator.py | rah9eu/p3 | 530628be7b7a8dd3e6199c3bebebdbf104005e5f | [
"Apache-2.0"
] | 22 | 2019-02-20T12:42:20.000Z | 2021-12-25T06:09:46.000Z | tests/python/unittest/test_sparse_operator.py | rah9eu/p3 | 530628be7b7a8dd3e6199c3bebebdbf104005e5f | [
"Apache-2.0"
] | 4 | 2019-04-01T07:36:04.000Z | 2022-03-24T03:11:26.000Z | tests/python/unittest/test_sparse_operator.py | rah9eu/p3 | 530628be7b7a8dd3e6199c3bebebdbf104005e5f | [
"Apache-2.0"
] | 7 | 2019-03-20T16:04:37.000Z | 2021-04-28T18:40:11.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from mxnet.test_utils import *
import random
import warnings
def is_scalar(var):
return False if hasattr(var, "__len__") else True
def get_result_type(call, dflt_stype):
"""Try to infer result storage type for a sparse matrix and a given unary operation"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
result = do_normalize(call(zero))
if not almost_equal(result, zero, equal_nan=True):
expected_result_type = 'default'
else:
if dflt_stype is not None:
expected_result_type = dflt_stype;
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_with_scalar(call, dflt_stype):
"""Try to infer result storage type when operating a sparse matrices and a scalar"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
result = call(zero, 5)
if not almost_equal(result, zero, equal_nan=True):
expected_result_type = 'default'
else:
if dflt_stype is not None:
expected_result_type = dflt_stype;
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_2(call, dflt_stype):
"""Try to infer result storage type when operating on two sparse matrices"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
need_default = False
for outer in [zero, np.ones(zero.shape)]:
for inner in [zero, np.ones(zero.shape)]:
result = do_normalize(call(outer, inner))
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
if need_default is True:
break
if not need_default and dflt_stype is not None:
expected_result_type = dflt_stype
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_3(call, dflt_stype):
"""Try to infer result storage type when operating on three sparse matrices"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
need_default = False
for moon in [zero]:
for outer in [zero]:
for inner in [zero]:
res_1, res_2 = call(moon, outer, inner)
result = do_normalize(res_1)
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
result = do_normalize(res_2)
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
if need_default is True:
break
if need_default is True:
break
if not need_default and dflt_stype is not None:
expected_result_type = dflt_stype
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_fw_bw_result_types(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type(forward_numpy_call, fwd_res_dflt),
get_result_type(backward_numpy_call, bwd_res_dflt))
def get_fw_bw_result_types_2(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type(forward_numpy_call, fwd_res_dflt),
get_result_type_2(backward_numpy_call, bwd_res_dflt))
def get_fw_bw_result_types_with_scalar(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type_with_scalar(forward_numpy_call, fwd_res_dflt),
get_result_type_with_scalar(backward_numpy_call, bwd_res_dflt))
def gen_rsp_random_indices(shape, density=.5, force_indices=None):
assert density >= 0 and density <= 1
indices = set()
if force_indices is not None:
for val in force_indices:
indices.add(int(val))
if not np.isclose(density, .0, rtol=1.e-3, atol=1.e-3, equal_nan=True) and len(shape) > 0:
row_count = shape[0]
for i in range(row_count):
r = random.uniform(0, 1)
if r <= density and len(indices) < shape[0]:
indices.add(i)
assert len(indices) <= shape[0]
return list(indices)
def all_zero(var):
return 0
def test_elemwise_binary_ops():
def test_elemwise_binary_op(name, lhs_stype, rhs_stype, shape,
forward_mxnet_call, forward_numpy_call, backward_numpy_call,
lhs_grad_stype,
rhs_grad_stype,
expected_result_storage_type=None,
modifier_func=None,
lhs_density=.5,
rhs_density=.5,
force_lr_overlap=False,
force_grad_overlap=False,
ograd_density=0.0,
skip_gradient_check=False,
shuffle_csr_indices=True,
verbose=False):
if lhs_grad_stype is None:
lhs_grad_stype = lhs_stype
if rhs_grad_stype is None:
rhs_grad_stype = rhs_stype
lhs_grad_stype = get_result_type_3(backward_numpy_call, lhs_grad_stype)
rhs_grad_stype = get_result_type_3(backward_numpy_call, rhs_grad_stype)
if verbose is True:
print("testing: {} lhs={}, rhs={}, lhs_grad_stype={}, rhs_grad_stype={}"
.format(name, lhs_stype, rhs_stype, lhs_grad_stype, rhs_grad_stype))
# Output type should be same as lvalue type, unless otherwise specified
if expected_result_storage_type is None:
if lhs_stype == 'default' or rhs_stype == 'default':
expected_result_storage_type = 'default'
else:
expected_result_storage_type = lhs_stype
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
grad_stypes = dict()
grad_stypes['lhs'] = lhs_grad_stype
grad_stypes['rhs'] = rhs_grad_stype
if lhs_stype == 'default':
lhs_nd = rand_ndarray(shape, 'default')
if abs(lhs_density) < 1e-4:
func = all_zero
else:
func = modifier_func
lhs_nd = mx.nd.array(assign_each(lhs_nd.asnumpy(), func))
else:
lhs_nd = create_sparse_array_zd(
shape, lhs_stype, density=lhs_density,
modifier_func=modifier_func,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=lhs_density,
force_indices=[(shape[0]/2)] if force_lr_overlap is True else None
))
if rhs_stype == 'default':
rhs_nd = rand_ndarray(shape, 'default')
if abs(rhs_density) < 1e-4:
func = all_zero
else:
func = modifier_func
rhs_nd = mx.nd.array(assign_each(rhs_nd.asnumpy(), func))
else:
rhs_nd = create_sparse_array_zd(
shape, rhs_stype, density=rhs_density,
modifier_func=modifier_func,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=rhs_density,
force_indices=[(shape[0]/2)] if force_lr_overlap is True else None
))
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd.asnumpy()
if verbose is True:
print("lhs input: {}".format(lhs_np))
print("rhs input: {}".format(rhs_np))
out_np = forward_numpy_call(lhs_np, rhs_np)
if verbose is True:
print("out_np: {}".format(out_np))
test = forward_mxnet_call(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
outputs = check_symbolic_forward(test, location, [out_np], equal_nan=True)
assert len(outputs) == 1
assert outputs[0].stype == expected_result_storage_type
if verbose is True:
print ("mx forward output: ", outputs[0].asnumpy())
print ("lhs_nd: ", lhs_nd.stype)
print ("rhs_nd: ", rhs_nd.stype)
print ("forward output: ", outputs[0].stype)
if outputs[0].stype != 'default':
out_grad = create_sparse_array_zd(
shape, outputs[0].stype, density=ograd_density,
data_init=1,
modifier_func=lambda x: 2,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=ograd_density,
force_indices=[(shape[0]/2)] if force_grad_overlap is True else None
))
else:
if abs(ograd_density) < 1e-4:
out_grad = mx.nd.array(np.zeros(shape))
else:
out_grad = mx.nd.array(np.ones(shape))
out_grad_np = out_grad.asnumpy()
if verbose is True:
print("out_grad_np", out_grad_np)
ingrad_lhs_np, ingrad_rhs_np = backward_numpy_call(out_grad_np, lhs_np, rhs_np)
if verbose is True:
print("out_grad", out_grad.asnumpy())
print("ingrad_lhs_np", ingrad_lhs_np)
print("ingrad_rhs_np", ingrad_rhs_np)
igrads_result = check_symbolic_backward(test, location, [out_grad],
[ingrad_lhs_np, ingrad_rhs_np],
grad_stypes=grad_stypes,
equal_nan=True)
if verbose is True:
print("ingrad_lhs", igrads_result['lhs'].asnumpy())
print("ingrad_rhs", igrads_result['rhs'].asnumpy())
assert len(igrads_result) == 2
if lhs_grad_stype is not None:
assert igrads_result['lhs'].stype == lhs_grad_stype
if rhs_grad_stype is not None:
assert igrads_result['rhs'].stype == rhs_grad_stype
if skip_gradient_check is not True:
check_numeric_gradient(test, location,
grad_stype_dict=grad_stypes)
def check_all(l, r, check_function):
assert l.shape == r.shape
return check_function(l, r)
def gt(l, r):
return check_all(l, r, lambda a, b: a > b)
def ge(l, r):
return check_all(l, r, lambda a, b: a >= b)
def lt(l, r):
return check_all(l, r, lambda a, b: a < b)
def le(l, r):
return check_all(l, r, lambda a, b: a <= b)
def least_sparse(lstype, rstype):
if lstype == 'default' and rstype == 'default':
return 'default'
elif rstype != 'default':
return rstype
return lstype
def most_dense(lstype, rstype):
if lstype == rstype:
return lstype
return 'default'
def check_elemwise_binary_ops(lhs_stype, rhs_stype, shape,
lhs_grad_stype=None, rhs_grad_stype=None,
lhs_density=.5, rhs_density=.5,
force_lr_overlap=False,
force_grad_overlap=False,
ograd_density=0.0):
test_elemwise_binary_op("elemwise_add", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_add(l, r),
lambda l, r: l + r,
lambda outg, l, r: (outg, outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_sub", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_sub(l, r),
lambda l, r: l - r,
lambda outg, l, r: (outg, -outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density,
rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_mul", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_mul(l, r),
lambda l, r: l * r,
lambda outg, l, r: (outg * r, outg * l),
least_sparse(lhs_stype, rhs_stype),
least_sparse(lhs_stype, rhs_stype),
expected_result_storage_type=most_dense(lhs_stype, rhs_stype),
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_div", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_div(l, r),
lambda l, r: l / r,
lambda outg, l, r: (outg * (1/r), outg * (-l/(r*r))),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
expected_result_storage_type='default',
skip_gradient_check=True,
verbose=False)
test_elemwise_binary_op("maximum", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._maximum(l, r),
lambda l, r: np.maximum(l, r),
lambda outg, l, r: (outg * ge(l, r), outg * lt(l, r)),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
skip_gradient_check=True,
ograd_density=ograd_density,
verbose=False)
test_elemwise_binary_op("minimum", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._minimum(l, r),
lambda l, r: np.minimum(l, r),
lambda outg, l, r: (outg * le(l, r), outg * gt(l, r)),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
skip_gradient_check=True,
verbose=False)
test_elemwise_binary_op("hypot", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._hypot(l, r),
lambda l, r: np.hypot(l, r),
lambda outg, l, r: (
outg * assign_each2(
l, r, lambda a, b: a/np.sqrt(a * a + b * b)),
outg * assign_each2(
l, r, lambda a, b: b/np.sqrt(a * a + b * b))
),
lhs_grad_stype, rhs_grad_stype,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
skip_gradient_check=True,
verbose=False)
# Run basic tests
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for ii in range(1):
# Run defaults
check_elemwise_binary_ops('default', 'default', rand_shape_2d())
# Try different densities
for lhs_density in [0.0, random.uniform(0, 1), 1.0]:
for rhs_density in [0.0, random.uniform(0, 1), 1.0]:
for ograd_density in [0.0, random.uniform(0, 1), 1.0]:
shape = rand_shape_2d()
print("lhs_density={}, rhs_density={}, ograd_density={}, shape: {}".format(
lhs_density, rhs_density, ograd_density, shape))
# Try row_sparse overlaps
for force_lr_overlap in [False, True]:
for force_grad_overlap in [False, True]:
shape = rand_shape_2d()
print(" force_lr_overlap={}, force_grad_overlap={}, shape={}".
format(force_lr_overlap, force_grad_overlap, shape))
# Left and right always overlap when one is default storage
# (assuming the row_sparse one has some entries in it)
if force_lr_overlap is False:
check_elemwise_binary_ops('default', 'row_sparse', shape,
lhs_density=lhs_density,
rhs_density=rhs_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
ograd_density=ograd_density)
check_elemwise_binary_ops('row_sparse', 'default', shape,
lhs_density=lhs_density,
rhs_density=rhs_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
ograd_density=ograd_density)
# Back to left-right overlap possiblities
check_elemwise_binary_ops('row_sparse', 'row_sparse', shape,
lhs_grad_stype='row_sparse',
rhs_grad_stype='row_sparse',
lhs_density=lhs_density,
rhs_density=rhs_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
ograd_density=ograd_density)
# No overlap flags for CSR
check_elemwise_binary_ops('csr', 'csr', shape,
lhs_grad_stype='csr',
rhs_grad_stype='csr',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
check_elemwise_binary_ops('csr', 'csr', shape,
lhs_grad_stype='default',
rhs_grad_stype='default',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
check_elemwise_binary_ops('default', 'csr', shape,
lhs_grad_stype='csr',
rhs_grad_stype='csr',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
check_elemwise_binary_ops('csr', 'default', shape,
lhs_grad_stype='csr',
rhs_grad_stype='csr',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
def test_elemwise_csr_same_zeros():
# Zeroes
a = mx.nd.sparse.zeros('csr', (1,1))
b = mx.nd.elemwise_add(a,a)
res = a.asnumpy() + a.asnumpy()
assert_almost_equal(b.asnumpy(), res)
def as_dense(arr):
if arr.stype != 'default':
return mx.nd.cast_storage(arr, stype='default')
else:
return arr;
# Make sure that 0's look like 0's when we do a comparison
def do_normalize(arr):
ret = arr.copy()
idx = np.isclose(arr, -0, rtol=1.e-3, atol=1.e-3, equal_nan=True)
ret[idx] = 0
return ret
def check_sparse_mathematical_core(name, stype,
forward_mxnet_call, forward_numpy_call, backward_numpy_call=None,
rhs_arg=None, data_init=9., grad_init=2., output_grad_stype=None,
input_grad_stype=None, force_overlap=False, density=.5,
ograd_density=.5, verbose=False, shuffle_csr_indices=True):
if verbose is True:
print("TESTING: " + name)
data = mx.symbol.Variable('data', stype=stype)
temp_input_grad_stype = input_grad_stype
if temp_input_grad_stype is None:
temp_input_grad_stype = stype
if rhs_arg is not None:
if is_scalar(rhs_arg):
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_with_scalar(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
else:
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_2(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
else:
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
if input_grad_stype is not None and input_grad_stype != expected_grad_result_type:
print("{}: explicit override of deduced input grad type '{}' with '{}'".format(
name, expected_grad_result_type, input_grad_stype))
expected_grad_result_type = input_grad_stype
shape = rand_shape_2d()
if verbose is True:
print("Shape: ", shape, "density: ", density, "force_overlap", force_overlap)
if stype == 'default':
data_tmp = np.zeros(shape)
if abs(density) >= 1e-4:
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
else:
arr_data = create_sparse_array_zd(
shape, stype, density=density,
data_init=data_init,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] if force_overlap is True else None
)
)
data_tmp = arr_data.asnumpy()
if verbose is True:
print("arr_data indices", arr_data.indices.asnumpy())
if verbose is True:
print("input", data_tmp)
if backward_numpy_call is None:
arr_grad = None
elif expected_grad_result_type == 'default':
if abs(density) < 1e-4:
arr_grad = mx.nd.zeros(shape)
else:
arr_grad = mx.nd.ones(shape)
else:
arr_grad = create_sparse_array_zd(
shape,
expected_grad_result_type,
density=density,
data_init=1,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] if force_overlap is True else None
)
)
if rhs_arg is not None:
test = forward_mxnet_call(data, rhs_arg)
else:
test = forward_mxnet_call(data)
args = list()
args.append(arr_data)
if arr_grad is not None:
exe_test = test.bind(default_context(), args=args, args_grad=[arr_grad])
else:
exe_test = test.bind(default_context(), args=args)
exe_test.forward(is_train=True)
assert exe_test.outputs[0].stype == expected_result_type
out = exe_test.outputs[0].asnumpy()
if rhs_arg is not None:
npout = forward_numpy_call(data_tmp, rhs_arg)
else:
npout = forward_numpy_call(data_tmp)
if verbose is True:
print("out", out)
print("npout", npout)
assert_almost_equal(out, npout, equal_nan=True)
if backward_numpy_call is not None:
if output_grad_stype == 'default' or output_grad_stype is None:
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
else:
out_grad = create_sparse_array_zd(
shape, output_grad_stype,
density=density,
data_init=grad_init,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=ograd_density,
force_indices=[(shape[0]/2)] if force_overlap is True else None))
npout_grad = out_grad.asnumpy()
if verbose is True:
print("npout_grad", npout_grad)
if rhs_arg is not None:
temp = backward_numpy_call(data_tmp, rhs_arg)
else:
temp = backward_numpy_call(data_tmp)
input_grad = npout_grad * temp
if verbose is True:
print(arr_grad.asnumpy())
exe_test.backward(out_grad)
if verbose is True:
print(arr_grad.asnumpy())
assert arr_grad.stype == expected_grad_result_type
arr_grad = arr_grad.asnumpy()
if verbose is True:
print(name)
print("arr_grad", arr_grad)
print("input_grad", input_grad)
assert_almost_equal(arr_grad, input_grad, equal_nan=True)
def test_sparse_mathematical_core():
def util_sign(a):
if np.isclose(a, -0, rtol=1.e-3, atol=1.e-3, equal_nan=True):
return 0
elif np.isclose(a, 0, rtol=1.e-3, atol=1.e-3, equal_nan=True):
return 0
elif a < 0.0:
return -1
else: # a > 0.0:
return 1
# Check scalar binary operators
def check_binary_op_with_scalar(stype,
output_grad_stype=None,
input_grad_stype=None,
density=.5, ograd_density=.5,
force_overlap=False,):
# mul_scalar
check_sparse_mathematical_core("mul_scalar", stype,
lambda x, y: x * y,
lambda x, y: x * y,
lambda input, rhs: rhs,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# plus_scalar
check_sparse_mathematical_core("plus_scalar", stype,
lambda x, y: x + y,
lambda x, y: x + y,
lambda input, rhs: 1,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# minus_scalar
check_sparse_mathematical_core("minus_scalar", stype,
lambda x, y: x - y,
lambda x, y: x - y,
lambda input, rhs: 1,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# Check many basic unary operators
def check_mathematical_core(stype, output_grad_stype=None,
input_grad_stype=None, force_overlap=False,
density=.5, ograd_density=.5):
# negative
check_sparse_mathematical_core("negative", stype,
lambda x: mx.sym.sparse.negative(x),
lambda x: np.negative(x),
force_overlap=force_overlap,
density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
# square
check_sparse_mathematical_core("square", stype,
lambda x: mx.sym.sparse.square(x),
lambda x: np.square(x),
lambda x: 2 * x,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density,
verbose=False)
if stype != "csr":
# sqrt
check_sparse_mathematical_core("sqrt", stype,
lambda x: mx.sym.sparse.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 1.0/(2.0 * np.sqrt(x)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density,
verbose=False)
# rsqrt
check_sparse_mathematical_core("rsqrt", stype,
lambda x: mx.sym.sparse.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# tan
check_sparse_mathematical_core("tan", stype,
lambda x: mx.sym.sparse.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density,
ograd_density=ograd_density)
# abs
check_sparse_mathematical_core("abs", stype,
lambda x: mx.sym.sparse.abs(x),
lambda x: np.abs(x),
lambda x: assign_each(x, function=util_sign),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# floor
check_sparse_mathematical_core("floor", stype, lambda x: mx.sym.sparse.floor(x),
lambda x: np.floor(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# ceil
check_sparse_mathematical_core("ceil", stype,
lambda x: mx.sym.sparse.ceil(x),
lambda x: np.ceil(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# sign
check_sparse_mathematical_core("sign", stype,
lambda x: mx.sym.sparse.sign(x),
lambda x: np.sign(x),
lambda x: np.zeros(x.shape),
output_grad_stype=output_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# cos
check_sparse_mathematical_core("cos", stype,
lambda x: mx.sym.sparse.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# sin
check_sparse_mathematical_core("sin", stype,
lambda x: mx.sym.sparse.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# arcsin
check_sparse_mathematical_core("arcsin", stype,
lambda x: mx.sym.sparse.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# arccos
check_sparse_mathematical_core("arccos", stype,
lambda x: mx.sym.sparse.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arctan
check_sparse_mathematical_core("arctan", stype,
lambda x: mx.sym.sparse.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# degrees
check_sparse_mathematical_core("degrees", stype,
lambda x: mx.sym.sparse.degrees(x),
lambda x: np.degrees(x),
lambda x: assign_each(x, lambda a: 180./np.pi),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# radians
check_sparse_mathematical_core("radians", stype,
lambda x: mx.sym.sparse.radians(x),
lambda x: np.radians(x),
lambda x: assign_each(x, lambda a: np.pi / 180.),
data_init=0.6, grad_init=1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# sinh
check_sparse_mathematical_core("sinh", stype,
lambda x: mx.sym.sparse.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# cosh
check_sparse_mathematical_core("cosh", stype,
lambda x: mx.sym.sparse.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
data_init=5, grad_init=5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# tanh
check_sparse_mathematical_core("tanh", stype,
lambda x: mx.sym.sparse.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
data_init=0.5, grad_init=1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arcsinh
check_sparse_mathematical_core("arcsinh", stype,
lambda x: mx.sym.sparse.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arccosh
check_sparse_mathematical_core("arccosh", stype,
lambda x: mx.sym.sparse.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arctanh
check_sparse_mathematical_core("arctanh", stype,
lambda x: mx.sym.sparse.arctanh(x),
lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.),
data_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# log1p
check_sparse_mathematical_core("log1p", stype,
lambda x: mx.sym.sparse.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# expm1
check_sparse_mathematical_core("expm1", stype,
lambda x: mx.sym.sparse.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# log10
check_sparse_mathematical_core("log10", stype,
lambda x: mx.sym.sparse.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# log2
check_sparse_mathematical_core("log2", stype,
lambda x: mx.sym.sparse.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# rint
check_sparse_mathematical_core("rint", stype,
lambda x: mx.sym.sparse.rint(x),
lambda x: np.rint(x),
force_overlap=force_overlap, density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
# fix
check_sparse_mathematical_core("fix", stype,
lambda x: mx.sym.sparse.fix(x),
lambda x: np.fix(x),
force_overlap=force_overlap, density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
try:
from scipy import special as scipy_special
import_succeeded = True
# gamma
check_sparse_mathematical_core("gamma", stype,
lambda x: mx.sym.sparse.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# gammaln
check_sparse_mathematical_core("gammaln", stype,
lambda x: mx.sym.sparse.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
except:
if import_succeeded == False:
print("Could not import scipy. Skipping unit tests for special functions")
else:
raise
for i in range(1):
print("pass", i)
for density in [0.0, random.uniform(0, 1), 1.0]:
for ograd_density in [0.0, random.uniform(0, 1), 1.0]:
for force_overlap in [False, True]:
print("{}, {}, {}".format(density, ograd_density, force_overlap))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check unary ops (unary fwd, binary bwd)
check_mathematical_core('default', force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', output_grad_stype='default',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', output_grad_stype='row_sparse',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('csr', output_grad_stype='default',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('csr', output_grad_stype='csr',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# Check binary with scalar ops
check_binary_op_with_scalar('default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse', output_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse',
output_grad_stype='row_sparse',
density=density, ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='csr',
input_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='csr',
input_grad_stype='csr',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
def test_elemwise_add_ex():
def check_elemwise_add_ex(lhs_stype, rhs_stype, shape, lhs_grad_stype=None, rhs_grad_stype=None):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
lhs_nd = rand_ndarray(shape, lhs_stype)
rhs_nd = rand_ndarray(shape, rhs_stype)
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd.asnumpy()
out_np = lhs_np + rhs_np
test = mx.symbol.sparse.elemwise_add(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(test, location, [out_np])
check_numeric_gradient(test, location)
grad_stypes = {}
if lhs_grad_stype is not None and lhs_grad_stype != 'default':
grad_stypes['lhs'] = lhs_grad_stype
if rhs_grad_stype is not None and rhs_grad_stype != 'default':
grad_stypes['rhs'] = rhs_grad_stype
check_symbolic_backward(test, location, [out_np], [out_np, out_np],
grad_stypes=grad_stypes)
shapes = [rand_shape_2d(), rand_shape_3d()]
for shape in shapes:
check_elemwise_add_ex('default', 'default', shape)
check_elemwise_add_ex('row_sparse', 'row_sparse', shape,
lhs_grad_stype='row_sparse', rhs_grad_stype='row_sparse')
def test_cast_storage_ex():
def check_cast_storage(shape, density, from_stype, to_stype, check_numeric_grad=True):
x = mx.symbol.Variable('x', stype=from_stype)
x_nd = rand_ndarray(shape, from_stype, density=density)
x_np = x_nd.asnumpy()
out_np = x_np
test = mx.symbol.cast_storage(x, stype=to_stype)
location = {'x': x_nd}
check_symbolic_forward(test, location, [out_np])
# consider disable the numeric grad check for gpu block kernel since the input is large
if check_numeric_grad:
check_numeric_gradient(test, location)
grad_stypes = {'x': to_stype}
check_symbolic_backward(test, location, [out_np], [out_np], grad_stypes=grad_stypes)
density = [1.00, 0.50, 0.01]
for d in density:
shape_2d = rand_shape_2d()
shape_3d = rand_shape_3d()
check_cast_storage(shape_2d, d, 'csr', 'default')
check_cast_storage(shape_2d, d, 'default', 'csr')
check_cast_storage(shape_2d, d, 'row_sparse', 'default')
check_cast_storage(shape_2d, d, 'default', 'row_sparse')
check_cast_storage(shape_3d, d, 'row_sparse', 'default')
check_cast_storage(shape_3d, d, 'default', 'row_sparse')
for i in range(4, 6):
shape = rand_shape_nd(i, 5)
check_cast_storage(shape, d, 'default', 'row_sparse')
check_cast_storage(shape, d, 'row_sparse', 'default')
# Test specific gpu kernels
if default_context().device_type is 'gpu':
dim0 = rnd.randint(1, 10)
# test gpu thread kernel
check_cast_storage((dim0, rnd.randint( 1, 32)), d, 'default', 'csr')
# test gpu warp kernel
check_cast_storage((dim0, rnd.randint( 32, 512)), d, 'default', 'csr')
# test gpu block kernel
check_cast_storage((dim0, rnd.randint(512, 1024)), d, 'default', 'csr',
check_numeric_grad=False)
# test gpu thread kernel
check_cast_storage((dim0, rnd.randint( 1, 32)), d, 'default', 'row_sparse')
# test gpu warp kernel
check_cast_storage((dim0, rnd.randint( 32, 512)), d, 'default', 'row_sparse')
# test gpu block kernel
check_cast_storage((dim0, rnd.randint(512, 1024)), d, 'default', 'row_sparse',
check_numeric_grad=False)
def test_sparse_dot():
def test_dot_csr(lhs_shape, rhs_shape, rhs_stype, trans_lhs, lhs_density, rhs_density):
lhs_nd = rand_ndarray(lhs_shape, 'csr', density=lhs_density, shuffle_csr_indices=False)
lhs_dns = lhs_nd.tostype('default')
rhs_nd = rand_ndarray(rhs_shape, rhs_stype, density=rhs_density)
rhs_dns = rhs_nd if rhs_stype == 'default' else rhs_nd.tostype('default')
out = mx.nd.dot(lhs_nd, rhs_nd, transpose_a=trans_lhs)
out_dns = mx.nd.dot(lhs_dns, rhs_dns, transpose_a=trans_lhs)
out_np = out_dns.asnumpy()
assert_almost_equal(out.asnumpy(), out_np, rtol=1e-4, atol=1e-5)
# test symbolic forward
lhs = mx.symbol.Variable('lhs', stype='csr')
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
out = mx.symbol.sparse.dot(lhs, rhs, transpose_a=trans_lhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
# test symbolic backward
backward_trans = not trans_lhs
rhs_backward_grad = mx.nd.dot(lhs_dns, out_dns, transpose_a=backward_trans).asnumpy()
expected = {'rhs': rhs_backward_grad}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'null', 'rhs': 'write'},
rtol=1e-3, atol=1e-4)
def test_dot_dns_csr(lhs_shape, rhs_shape, lhs_density, rhs_density, trans_lhs=False, trans_rhs=False):
lhs_nd = rand_ndarray(lhs_shape, stype='default', density=lhs_density)
rhs_nd = rand_ndarray(rhs_shape, stype='csr', density=rhs_density)
rhs_dns = rhs_nd.tostype('default')
out = mx.nd.sparse.dot(lhs_nd, rhs_nd, transpose_a=trans_lhs, transpose_b=trans_rhs)
out_dns = mx.nd.dot(lhs_nd, rhs_dns, transpose_a=trans_lhs, transpose_b=trans_rhs)
out_np = out_dns.asnumpy()
assert_almost_equal(out.asnumpy(), out_np, rtol=1e-4, atol=1e-5)
# test symbolic forward
lhs = mx.symbol.Variable('lhs', stype='default')
rhs = mx.symbol.Variable('rhs', stype='csr')
out = mx.symbol.sparse.dot(lhs, rhs, transpose_a=trans_lhs, transpose_b=trans_rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
# test symbolic backward
backward_trans = not trans_lhs
rhs_backward_grad = mx.nd.dot(lhs_nd, out_dns, transpose_a=backward_trans).asnumpy()
expected = {'rhs': rhs_backward_grad}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'null', 'rhs': 'write'},
rtol=1e-3, atol=1e-4)
def test_sparse_dot_zero_output(lhs_shape, trans_lhs, rhs_num_cols):
"""Test for nnr_out = 0. Before the fix, the test would fail."""
lhs = mx.nd.zeros(lhs_shape)
irow = np.random.randint(0, lhs_shape[0])
icol = np.random.randint(0, lhs_shape[1])
lhs[irow, icol] = 1.0
if trans_lhs:
rhs = rand_ndarray(shape=(lhs_shape[0], rhs_num_cols), stype='default')
rhs[irow, :] = 0
else:
rhs = rand_ndarray(shape=(lhs_shape[1], rhs_num_cols), stype='default')
rhs[icol, :] = 0
dns_out = mx.nd.dot(lhs, rhs, transpose_a=trans_lhs)
assert mx.nd.sum(mx.nd.abs(dns_out)).asscalar() == 0
sps_out = mx.nd.sparse.dot(lhs.tostype('csr'), rhs.tostype('row_sparse'), transpose_a=trans_lhs)
assert same(dns_out.asnumpy(), sps_out.asnumpy())
density = [1.00, 0.50, 0.01]
for lhs_d in density:
lhs_shape = rand_shape_2d(50, 200)
rhs_d = 1
test_dot_csr(lhs_shape, (lhs_shape[1], 1), 'default', False, lhs_d, rhs_d) # test gpu SpMV
test_dot_csr(lhs_shape, (lhs_shape[0], 1), 'default', True, lhs_d, rhs_d) # (vector kernel)
test_dot_csr(lhs_shape, (lhs_shape[1], rnd.randint(5, 10)), 'default', False, lhs_d, rhs_d) # test gpu SpMM
test_dot_csr(lhs_shape, (lhs_shape[0], rnd.randint(5, 10)), 'default', True, lhs_d, rhs_d) # (scalar kernel)
test_dot_dns_csr(lhs_shape, (lhs_shape[1], rnd.randint(50, 200)), lhs_d, lhs_d)
for rhs_d in density:
test_dot_csr(lhs_shape, (lhs_shape[1], rnd.randint(1, 10)), 'row_sparse', False, lhs_d, rhs_d)
test_dot_csr(lhs_shape, (lhs_shape[0], rnd.randint(1, 10)), 'row_sparse', True, lhs_d, rhs_d)
test_sparse_dot_zero_output(rand_shape_2d(50, 200), False, 40)
test_sparse_dot_zero_output(rand_shape_2d(50, 200), True, 40)
def test_sparse_slice():
def check_csr_slice(shape, slice_input):
storage_type = 'csr'
B, _ = rand_sparse_ndarray(shape, storage_type)
np = B.asnumpy()
begin = rnd.randint(0, B.shape[0] - 1)
end = rnd.randint(begin + 1, B.shape[0])
nd_slice = mx.nd.crop(B, begin=begin, end=end)
assert same(nd_slice.asnumpy(), np[begin:end]), (nd_slice.asnumpy(), np[begin:end])
shape = (rnd.randint(7, 15), rnd.randint(1, 10))
check_csr_slice(shape, True)
check_csr_slice(shape, False)
def test_sparse_retain():
def check_sparse_retain(shape, density, index_type=np.int64):
num_rows = shape[0]
rsp, _ = rand_sparse_ndarray(shape=shape, stype='row_sparse', density=density)
length = np.random.randint(1, num_rows + 1)
idx = random_sample(list(range(0, num_rows)), length)
idx.sort()
dns = rsp.asnumpy()
tensor_retained_expected = np.zeros(shape)
for i in idx:
tensor_retained_expected[i][:] = dns[i]
indices = mx.nd.array(idx, dtype=index_type)
rsp_retained = mx.nd.sparse.retain(rsp, indices=indices)
assert same(tensor_retained_expected, rsp_retained.asnumpy())
# check numeric gradient
data = mx.symbol.Variable('data')
idx = mx.symbol.Variable('indices')
sym = mx.sym.sparse.retain(data=data, indices=idx)
check_numeric_gradient(sym, [rsp, indices], grad_nodes=['data'],
grad_stype_dict={'data': 'row_sparse'})
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
densities = [0.01, 0.5, 1.0]
index_types = [np.float32, np.int32, np.int64]
for density in densities:
for itype in index_types:
check_sparse_retain(shape, density, itype)
check_sparse_retain(shape_3d, density, itype)
def test_sparse_unary_with_numerics():
def check_sparse_simple(name, stype, mxnet_func, forward_numpy_call,
backward_numpy_call, output_grad_stype=None,
backward_is_use_output=False):
if output_grad_stype is None:
output_grad_stype = stype
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_2(forward_numpy_call, stype, backward_numpy_call, output_grad_stype)
if backward_is_use_output is True:
expected_grad_result_type = expected_result_type
shape = (3, 4)
data = mx.symbol.Variable("data")
grad_stypes = {'data' : expected_grad_result_type}
y = mxnet_func(data)
if stype == 'default':
xa = np.random.uniform(low=-1.0, high=1.0, size=shape)
xa_np = xa
else:
xa = create_sparse_array(shape, stype, data_init=None, rsp_indices=[1],
modifier_func=lambda a: a - 0.5,
shuffle_csr_indices=True)
xa_np = xa.asnumpy()
if output_grad_stype != 'default':
out_grad = create_sparse_array(shape, output_grad_stype, data_init=None,
rsp_indices=[1, 2],
modifier_func=lambda a: a - 0.5,
shuffle_csr_indices=True)
out_grad_np = out_grad.asnumpy()
else:
out_grad_np = np.ones(xa.shape)
out_grad = mx.nd.array(out_grad_np)
output_np = forward_numpy_call(xa_np)
input_grad_np = backward_numpy_call(output_np, out_grad_np)
outputs = check_symbolic_forward(y, [xa], [output_np])
output = outputs[0]
assert output.stype == expected_result_type
input_grad_dict = check_symbolic_backward(y, location=[xa], out_grads=[out_grad],
expected=[input_grad_np],
grad_stypes=grad_stypes)
inp_grad = input_grad_dict["data"]
assert inp_grad.stype == expected_grad_result_type
def check_sparse_function(name, mxnet_func, forward_numpy_call, backward_numpy_call,
backward_is_use_output=False):
check_sparse_simple(name, 'default', mxnet_func, forward_numpy_call, backward_numpy_call)
for output_grad_stype in [None, "row_sparse", "default"]:
check_sparse_simple(name, 'row_sparse', mxnet_func, forward_numpy_call, backward_numpy_call,
output_grad_stype=output_grad_stype,
backward_is_use_output=backward_is_use_output)
check_sparse_function('relu',
lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.0),
lambda output, outg: outg * assign_each(output, lambda x: x > 0.0), backward_is_use_output=True)
check_sparse_function('sigmoid',
lambda x: mx.sym.sigmoid(x),
lambda x: np.divide(1.0, (1.0 + np.exp(-x))),
lambda output, outg: outg * assign_each(output, lambda x: x * (1.0 - x)),
backward_is_use_output=True)
def test_sparse_nd_zeros():
def check_sparse_nd_zeros(stype, shape):
zero = mx.nd.zeros(shape)
sparse_zero = mx.nd.zeros(shape=shape, stype=stype)
assert_almost_equal(sparse_zero.asnumpy(), zero.asnumpy())
shape = rand_shape_2d()
check_sparse_nd_zeros('row_sparse', shape)
check_sparse_nd_zeros('csr', shape)
check_sparse_nd_zeros('default', shape)
def test_sparse_nd_zeros_like():
def check_sparse_nd_zeros_like(stype, shape):
zero = mx.nd.zeros(shape, stype=stype)
zero_like = mx.nd.sparse.zeros_like(zero)
assert_almost_equal(zero.asnumpy(), zero_like.asnumpy())
shape = rand_shape_2d()
check_sparse_nd_zeros_like('row_sparse', shape)
check_sparse_nd_zeros_like('csr', shape)
def test_sparse_axis_operations():
def test_variations(func_name):
dim0 = 30
dim1 = 100
axes = [0, 1]
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
csr_array = rand_ndarray(shape=shape, stype='csr', density=density)
dns = csr_array.tostype('default')
for axis in axes:
ret = func_name(csr_array, axis=axis)
assert ret.stype == 'default'
ret_expected = func_name(dns, axis=axis)
assert_almost_equal(ret.asnumpy(), ret_expected.asnumpy())
def test_fallback(func_name, axis=0, keepdims=True, exclude=True):
dim0 = 30
dim1 = 100
shape = rand_shape_2d(dim0, dim1)
csr_array = rand_ndarray(shape=shape, stype='csr', density=0.01)
ret= func_name(csr_array, axis=axis, keepdims=keepdims,
exclude=exclude)
test_variations(mx.nd.sum)
test_fallback(mx.nd.sum, axis=0, keepdims=True, exclude=True)
test_variations(mx.nd.mean)
test_fallback(mx.nd.mean, axis=0, keepdims=True, exclude=True)
def test_sparse_square_sum():
dim0 = 30
dim1 = 30
axes = [0, 1]
keepdims = [False, True]
densities = [0, 0.01, 0.2, 0.5, 1.0]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
rsp = rand_ndarray(shape, 'row_sparse', density)
dns = rsp.tostype('default')
for axis in axes:
for keepdim in keepdims:
ret = mx.nd._internal._square_sum(rsp, axis=axis, keepdims=keepdim)
if axis == 1 and keepdim:
assert ret.stype == 'row_sparse'
else:
assert ret.stype == 'default'
ret_expected = mx.nd.sum(dns*dns, axis=axis, keepdims=keepdim)
# check forward result
assert_almost_equal(ret.asnumpy(), ret_expected.asnumpy())
rsp_data = mx.sym.Variable('data', stype='row_sparse')
test = mx.symbol._internal._square_sum(rsp_data, axis=axis, keepdims=keepdim)
# check symbolic backward since ograd can be an rsp
# and cannot be checked through check_numeric_gradient
# because it will add a loss layer as the output layer
# which makes ograd of the square_sum dense
if axis == 1 and keepdim:
dns_data = mx.sym.Variable('data')
baseline = mx.sym.sum(mx.sym.square(dns_data), axis=axis, keepdims=keepdim)
igrad_expected = mx.nd.empty(dns.shape)
baseline_exec = baseline.bind(default_context(), args=[dns],
args_grad=[igrad_expected])
baseline_exec.forward(is_train=True)
baseline_exec.backward([ret_expected])
# check backward when ograd is row sparse
check_symbolic_backward(test, [rsp], [ret_expected.tostype('row_sparse')],
[igrad_expected.asnumpy()], grad_stypes={'data': 'row_sparse'})
# check backward when ograd is dense
# the stype of output of the square_sum is deteremined in symbol binding stage.
# The ograd stype of the last layer is the same as the output stype of the last layer.
# Need to add one more layer after square_sum to trigger the kernel for ograd
# with default stype in square_sum op.
baseline1 = baseline + 1
baseline_exec1 = baseline1.bind(default_context(), args=[dns],
args_grad=[igrad_expected])
baseline_exec1.forward(is_train=True)
baseline_exec1.backward([ret_expected])
test1 = test + 1
check_symbolic_backward(test1, [rsp], [ret_expected], [igrad_expected.asnumpy()],
grad_stypes={'data': 'row_sparse'})
# check numeric gradient
check_numeric_gradient(test, [rsp], grad_stype_dict={'data': 'row_sparse'},
atol=1e-2, rtol=0.1)
def test_sparse_storage_fallback():
""" test operators which don't implement FComputeEx or FStatefulComputeEx """
def check_broadcast_add(shape, lhs_stype, rhs_stype):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
lhs_nd = rand_ndarray(shape, lhs_stype)
rhs_nd = rand_ndarray(shape, rhs_stype)
lhs_dns = mx.nd.cast_storage(lhs_nd, stype='default')
rhs_dns = mx.nd.cast_storage(rhs_nd, stype='default')
out_dns = (lhs_dns + rhs_dns).asnumpy()
test = mx.symbol.broadcast_add(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(test, location, [out_dns])
check_numeric_gradient(test, location)
check_symbolic_backward(test, location, [out_dns], [out_dns, out_dns])
def np_softmax(x, axis=-1):
# fix for old numpy on Travis not supporting keepdims
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_softmax_with_shape(lhs_stype, rhs_stype, shape, preserve_shape=False):
# bind with label
ctx = default_context()
X = mx.symbol.Variable('X', stype=lhs_stype)
L = mx.symbol.Variable('L', stype=rhs_stype)
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = rand_ndarray(shape, lhs_stype)
l = rand_ndarray(shape, rhs_stype)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx=ctx)
exec1 = Y.bind(ctx, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=1e-4)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(),
rtol=1e-3, atol=1e-4)
def check_concat(shape, lhs_stype, rhs_stype):
x = mx.symbol.Variable('x', stype=lhs_stype)
w = mx.symbol.Variable('w', stype=rhs_stype)
test = mx.sym.Concat(x, w)
x_nd = rand_ndarray(shape, lhs_stype)
w_nd = rand_ndarray(shape, rhs_stype)
location = {'x': x_nd, 'w': w_nd}
check_numeric_gradient(test, location)
def check_operator_with_temp_resource(shape, stype):
x = mx.symbol.Variable('x', stype=stype)
test = mx.sym.sum(x)
x_nd = rand_ndarray(shape, stype)
location = {'x': x_nd}
check_numeric_gradient(test, location)
shape = rand_shape_2d()
stypes = ['default', 'csr', 'row_sparse']
for lhs in stypes:
check_operator_with_temp_resource(shape, lhs)
for rhs in stypes:
check_broadcast_add(shape, lhs, rhs)
check_concat(shape, lhs, rhs)
check_softmax_with_shape(lhs, rhs, shape, preserve_shape=False)
check_softmax_with_shape(rhs, rhs, shape, preserve_shape=True)
def test_sparse_elementwise_sum():
def check_sparse_elementwise_sum_with_shape(stype, shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.sparse.add_n(*inputs, name='esum')
arr = []
arr_grad = [mx.nd.empty(shape, stype=stype) for _ in range(n)]
densities = [0, 0.01, 0.5, 1.0]
for i in range(n):
arr.append(rand_ndarray(shape, stype, densities[np.random.randint(0, len(densities))]))
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy())
for dim in range(2, 4):
shape = tuple(np.random.randint(5, 10, size=dim))
check_sparse_elementwise_sum_with_shape('row_sparse', shape, np.random.randint(1, 9))
def test_sparse_embedding():
''' test sparse embedding op on cpu '''
def check_sparse_embedding(executor, weight_ref, data_onehot, grad, density):
# update weight based on density
weight[:] = rand_ndarray(weight.shape, 'row_sparse', density=density)
# check forward
executor.forward(is_train=True)
assert_almost_equal(executor.outputs[0].asnumpy(), np.dot(data_onehot, weight.asnumpy()))
# check backward
executor.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(data_onehot.T, grad.asnumpy()))
densities = [0, 0.5, 1]
in_dim = 50
out_dim = 3
batch = 8
# init executor
data = mx.sym.Variable("data")
weight = mx.sym.Variable("embed_weight", stype='row_sparse')
embed = mx.sym.contrib.SparseEmbedding(data=data, weight=weight, input_dim=in_dim,
output_dim=out_dim, name="embed")
grad_req = {'data': 'null', 'embed_weight': 'write'}
exe_test = embed.simple_bind(default_context(), grad_req=grad_req, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
# init data
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
arg_map["data"][:] = np_data
# init grad
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.sparse.zeros('row_sparse', np_grad.shape)
grad[:] = np_grad
# weight
weight = arg_map["embed_weight"]
for density in densities:
check_sparse_embedding(exe_test, weight, np_onehot, grad, density)
def test_scatter_ops():
def csr_get_seen_points(name, csr_array, verbose=False):
"""Get a unique list of points int he CSR array as well as a
corresponding parallel list of points and values"""
seen_points = set()
seen_point_list = list()
values = list()
row_count = csr_array.shape[0]
row_pointers = csr_array.indptr.asnumpy()
col_indexes = csr_array.indices.asnumpy()
data = csr_array.data.asnumpy()
for row in range(row_count):
start_pos = row_pointers[row]
end_pos = row_pointers[row + 1]
for col_index in range(start_pos, end_pos):
col = col_indexes[col_index]
val = data[col_index]
if verbose is True:
print("{}: (row, col = ({}, {}) = {}".format(name, row, col, val))
seen_points.add((row, col))
seen_point_list.append((row, col))
values.append(val)
return seen_points, values, seen_point_list
def check_scatter_ops(name, shape, lhs_stype, rhs_stype, forward_mxnet_call, forward_numpy_call,
density=0.25, rhs_is_scalar=False, verbose=False):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
if rhs_is_scalar is False:
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
if verbose is True:
print(name)
if lhs_stype != 'default':
lhs_nd = create_sparse_array_zd(
shape, lhs_stype, density=density,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] # force at least one overlap
))
else:
lhs_nd = rand_ndarray(shape, 'default')
if rhs_is_scalar is False:
if rhs_stype != 'default':
rhs_nd = create_sparse_array_zd(
shape, rhs_stype, density=density,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] # force at least one overlap
))
else:
rhs_nd = rand_ndarray(shape, 'default')
else:
rhs_nd = 9
rhs = rhs_nd
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd if rhs_is_scalar is True else rhs_nd.asnumpy()
if verbose is True:
print("lhs = {}".format(lhs_np))
print("rhs = {}".format(rhs_np))
out_np = forward_numpy_call(lhs_np, rhs_np)
if verbose is True:
print("Numpy: out_np = {}".format(out_np))
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
out = forward_mxnet_call(lhs, rhs)
exe_test = out.bind(default_context(), args=location)
exe_test.forward(is_train=False)
out_nd = exe_test.outputs[0]
if verbose is True:
print("Sym: out_nd = {}".format(out_nd.asnumpy()))
# For row_sparse, check that rows only exist for rows that are
# either int lhs or rhs, and if they exist, they should equal
# the numpy values
if lhs_stype == 'default':
almost_equal(out_nd.asnumpy(), out_np, equal_nan=True)
elif lhs_stype == 'row_sparse':
seen_rows = set()
indices = lhs_nd.indices.asnumpy()
for i in range(len(indices)):
seen_rows.add(indices[i])
assert len(out_nd.indices.asnumpy()) == len(seen_rows)
out_nd_np = out_nd.asnumpy()
for row in seen_rows:
row_nd = out_nd_np[row]
row_np = out_np[row]
almost_equal(row_nd, row_np, equal_nan=True)
elif lhs_stype == 'csr' and rhs_is_scalar is False:
almost_equal(out_nd.asnumpy(), out_np, equal_nan=True)
else:
assert rhs_is_scalar
lhs_seen_points, _, _ = csr_get_seen_points("lhs", lhs_nd, verbose)
if rhs_is_scalar is False:
rhs_seen_points, _, _ = csr_get_seen_points("rhs", rhs_nd, verbose)
else:
rhs_seen_points = set()
input_seen_points = lhs_seen_points.union(rhs_seen_points)
out_seen_pounts, out_values, seen_point_list = csr_get_seen_points("out_nd", out_nd, verbose)
# Some may have been zero
assert len(out_seen_pounts) <= len(input_seen_points)
out_nd_np = out_nd.asnumpy()
val_index = 0
for row_col in seen_point_list:
row = row_col[0]
col = row_col[1]
val = out_values[val_index]
val_np = out_nd_np[row, col]
almost_equal(val, val_np, equal_nan=True)
val_index += 1
shape = (10, 5)
for lhs_stype in ['row_sparse', 'default', 'csr']:
for rhs_stype in ['row_sparse', 'default', 'csr']:
print("op: {}, lhs_stype: {}, rhs_stype: {}".format('_scatter_elemwise_div',
lhs_stype, rhs_stype))
check_scatter_ops('_scatter_elemwise_div', shape, lhs_stype, rhs_stype,
lambda l, r: mx.sym._internal._scatter_elemwise_div(l, r),
lambda l, r: l / r,
verbose=False)
for lhs_stype in ['row_sparse', 'default', 'csr']:
print("op: {}, lhs_stype: {}".format('_scatter_plus', lhs_stype))
check_scatter_ops('_scatter_plus', shape, lhs_stype, 'scalar',
lambda l, r: mx.sym._internal._scatter_plus_scalar(l, r),
lambda l, r: l + r,
rhs_is_scalar=True, verbose=False)
print("op: {}, lhs_stype: {}".format('_scatter_minus', lhs_stype))
check_scatter_ops('_scatter_minus', shape, lhs_stype, 'scalar',
lambda l, r: mx.sym._internal._scatter_minus_scalar(l, r),
lambda l, r: l + r,
rhs_is_scalar=True, verbose=False, density=0.5)
if __name__ == '__main__':
import nose
nose.runmodule()
| 48.341828 | 122 | 0.506022 |
275bc96bafc6a0d01aaaf36a844cd302cc146fea | 4,062 | py | Python | ttwidgets/__init__.py | GaryBloom/ttwidgets | 7a067be9a3dd96fc2cf812d0db34428503832737 | [
"Apache-2.0"
] | 2 | 2020-12-11T15:01:04.000Z | 2021-03-12T12:49:26.000Z | ttwidgets/__init__.py | GaryBloom/ttwidgets | 7a067be9a3dd96fc2cf812d0db34428503832737 | [
"Apache-2.0"
] | null | null | null | ttwidgets/__init__.py | GaryBloom/ttwidgets | 7a067be9a3dd96fc2cf812d0db34428503832737 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 Gary Michael Bloom
mailto:bloominator@hotmail.com
mailto:GaryBloomLaw@gmail.com
TTWidgets - Tagged-Text Multi-Font Enhanced Tkinter Widgets
=========
Package **TTWidgets** improves the Button, Label, and Listbox widgets of the
Tkinter library, and provides a bonus ToolTip widget, all with support for
multiple fonts and visual schemes via *tagged-text* input.
The standard Tkinter Button and Label widgets are limited to a single set
of widget options (e.g. background/bg, foreground/fg, bitmap, cursor,
relief, etc), including a SINGLE FONT. With the **TTWidgets** enhancements,
*tagged text* can be passed in as text to create a compound widget, with
multiple fonts and visual option sets, that behaves like a simple widget.
The Button and Label implementations are complete.
The Listbox implementation is partial: there is no multi-font support, but
the user can use the new 'text' option to pass in *tagged text* to define all
the elements with visual schemes. The user can also pass *tagged text* to the
insert() method, and thereby skip the secondary call to itemconfig() for the
inserted element(s).
A full multi-font implementation of a Listbox may come later.
As a bonus, a ToolTip widget is included, which also accepts *tagged text*
as an input option, allowing the creation of colorful and multi-font ToolTips.
As a **TTWidgets** Button example:
button = ttwidgets.TTButton(text="Isn't a <t case=title relief=raised bd=1 bg=white padx=2>button</t> with <tag bold fg=red bg=yellow>bold red on yellow text</tag> and \na <tag fg=blue funderline>hyperlink</tag> <tag relief=groove bd=2>groovy</tag> <tag bitmap=warning/>", command=lambda e=None: print("Released!"), bg='lightgray')
gives a Button with text "Isn't a Button with bold red on yellow text and
a hyperlink groovy!", where:
- "Button" appears inside a raised button (with "title" case),
- "bold red on yellow text" is bold red text on a yellow background,
- "hyperlink" is blue and underlined,
- "groovy" is in a box with 'groove' relief, and
- "!" is a warning bitmap.
Here are two versions of the same button. Which one would YOU rather use? :)

For an overview of *tagged text*, please see the ttwidgets.TTWidget help.
This package includes the following:
CLASSES:
TTWidget (base class for TTButton and TTLabel)
TTButton (inherits from TTWidget)
TTLabel (inherits from TTWidget)
TTListbox (inherits from Tkinter.Listbox)
TTToolTip (does not inherit, but uses a TTLabel)
METHODS:
alias
convert_font_dict_to_ttoptions_dict
convert_ttoptions_dict_to_font_dict
dump
gen_tag_attrs
get_font_dict
get_named_font
is_tagged_text
pare_dict
parse_tag_attrs
quote
split_attrs
split_chunk
split_dict_into_options_fontattrs_and_case
split_tagged_text_into_chunks
strip_tags
unalias
unmap
update_named_font
wrap_tagged_text
Compatability:
Py 3.5 - 3.9 is supported.
Py 3.4 and earlier are unsupported
Copyright 2020 Gary Michael Bloom
mailto:bloominator@hotmail.com
mailto:GaryBloomLaw@gmail.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .ttwidgets import *
__version__ = "1.0.5"
| 39.436893 | 335 | 0.718119 |
4026da48cb9a13823ff5dd9bc2f605977edaac3f | 23,270 | py | Python | ironic/drivers/modules/redfish/management.py | pgaxatte/ironic | 4af622db2fa68e8302d385357fef793c9b0c9b9a | [
"Apache-2.0"
] | 1 | 2019-07-29T04:57:15.000Z | 2019-07-29T04:57:15.000Z | ironic/drivers/modules/redfish/management.py | pgaxatte/ironic | 4af622db2fa68e8302d385357fef793c9b0c9b9a | [
"Apache-2.0"
] | 13 | 2019-04-12T21:55:36.000Z | 2020-05-26T14:07:13.000Z | ironic/drivers/modules/redfish/management.py | cdearborn/ironic | 4d41af5038369b31ae44e18d2cbf2e311a8bc7fa | [
"Apache-2.0"
] | 11 | 2019-04-10T04:43:33.000Z | 2020-01-31T10:34:14.000Z | # Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_log import log
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import boot_modes
from ironic.common import components
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import indicator_states
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules.redfish import utils as redfish_utils
LOG = log.getLogger(__name__)
sushy = importutils.try_import('sushy')
if sushy:
BOOT_DEVICE_MAP = {
sushy.BOOT_SOURCE_TARGET_PXE: boot_devices.PXE,
sushy.BOOT_SOURCE_TARGET_HDD: boot_devices.DISK,
sushy.BOOT_SOURCE_TARGET_CD: boot_devices.CDROM,
sushy.BOOT_SOURCE_TARGET_BIOS_SETUP: boot_devices.BIOS
}
BOOT_DEVICE_MAP_REV = {v: k for k, v in BOOT_DEVICE_MAP.items()}
BOOT_MODE_MAP = {
sushy.BOOT_SOURCE_MODE_UEFI: boot_modes.UEFI,
sushy.BOOT_SOURCE_MODE_BIOS: boot_modes.LEGACY_BIOS
}
BOOT_MODE_MAP_REV = {v: k for k, v in BOOT_MODE_MAP.items()}
BOOT_DEVICE_PERSISTENT_MAP = {
sushy.BOOT_SOURCE_ENABLED_CONTINUOUS: True,
sushy.BOOT_SOURCE_ENABLED_ONCE: False
}
BOOT_DEVICE_PERSISTENT_MAP_REV = {v: k for k, v in
BOOT_DEVICE_PERSISTENT_MAP.items()}
INDICATOR_MAP = {
sushy.INDICATOR_LED_LIT: indicator_states.ON,
sushy.INDICATOR_LED_OFF: indicator_states.OFF,
sushy.INDICATOR_LED_BLINKING: indicator_states.BLINKING,
sushy.INDICATOR_LED_UNKNOWN: indicator_states.UNKNOWN
}
INDICATOR_MAP_REV = {
v: k for k, v in INDICATOR_MAP.items()}
class RedfishManagement(base.ManagementInterface):
def __init__(self):
"""Initialize the Redfish management interface.
:raises: DriverLoadError if the driver can't be loaded due to
missing dependencies
"""
super(RedfishManagement, self).__init__()
if not sushy:
raise exception.DriverLoadError(
driver='redfish',
reason=_('Unable to import the sushy library'))
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return redfish_utils.COMMON_PROPERTIES.copy()
def validate(self, task):
"""Validates the driver information needed by the redfish driver.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue on malformed parameter(s)
:raises: MissingParameterValue on missing parameter(s)
"""
redfish_utils.parse_driver_info(task.node)
def get_supported_boot_devices(self, task):
"""Get a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(BOOT_DEVICE_MAP_REV)
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for a node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue on malformed parameter(s)
:raises: MissingParameterValue on missing parameter(s)
:raises: RedfishConnectionError when it fails to connect to Redfish
:raises: RedfishError on an error from the Sushy library
"""
system = redfish_utils.get_system(task.node)
desired_persistence = BOOT_DEVICE_PERSISTENT_MAP_REV[persistent]
current_persistence = system.boot.get('enabled')
# NOTE(etingof): this can be racy, esp if BMC is not RESTful
enabled = (desired_persistence
if desired_persistence != current_persistence else None)
try:
system.set_system_boot_options(
BOOT_DEVICE_MAP_REV[device], enabled=enabled)
except sushy.exceptions.SushyError as e:
error_msg = (_('Redfish set boot device failed for node '
'%(node)s. Error: %(error)s') %
{'node': task.node.uuid, 'error': e})
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
def get_boot_device(self, task):
"""Get the current boot device for a node.
:param task: a task from TaskManager.
:raises: InvalidParameterValue on malformed parameter(s)
:raises: MissingParameterValue on missing parameter(s)
:raises: RedfishConnectionError when it fails to connect to Redfish
:raises: RedfishError on an error from the Sushy library
:returns: a dictionary containing:
:boot_device:
the boot device, one of :mod:`ironic.common.boot_devices` or
None if it is unknown.
:persistent:
Boolean value or None, True if the boot device persists,
False otherwise. None if it's unknown.
"""
system = redfish_utils.get_system(task.node)
return {'boot_device': BOOT_DEVICE_MAP.get(system.boot.get('target')),
'persistent': BOOT_DEVICE_PERSISTENT_MAP.get(
system.boot.get('enabled'))}
def get_supported_boot_modes(self, task):
"""Get a list of the supported boot modes.
:param task: A task from TaskManager.
:returns: A list with the supported boot modes defined
in :mod:`ironic.common.boot_modes`. If boot
mode support can't be determined, empty list
is returned.
"""
return list(BOOT_MODE_MAP_REV)
@task_manager.require_exclusive_lock
def set_boot_mode(self, task, mode):
"""Set the boot mode for a node.
Set the boot mode to use on next reboot of the node.
:param task: A task from TaskManager.
:param mode: The boot mode, one of
:mod:`ironic.common.boot_modes`.
:raises: InvalidParameterValue if an invalid boot mode is
specified.
:raises: MissingParameterValue if a required parameter is missing
:raises: RedfishConnectionError when it fails to connect to Redfish
:raises: RedfishError on an error from the Sushy library
"""
system = redfish_utils.get_system(task.node)
try:
system.set_system_boot_options(mode=BOOT_MODE_MAP_REV[mode])
except sushy.exceptions.SushyError as e:
error_msg = (_('Setting boot mode to %(mode)s '
'failed for node %(node)s. '
'Error: %(error)s') %
{'node': task.node.uuid, 'mode': mode,
'error': e})
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
def get_boot_mode(self, task):
"""Get the current boot mode for a node.
Provides the current boot mode of the node.
:param task: A task from TaskManager.
:raises: MissingParameterValue if a required parameter is missing
:raises: DriverOperationError or its derivative in case
of driver runtime error.
:returns: The boot mode, one of :mod:`ironic.common.boot_mode` or
None if it is unknown.
"""
system = redfish_utils.get_system(task.node)
return BOOT_MODE_MAP.get(system.boot.get('mode'))
@staticmethod
def _sensor2dict(resource, *fields):
return {field: getattr(resource, field)
for field in fields
if hasattr(resource, field)}
@classmethod
def _get_sensors_fan(cls, chassis):
"""Get fan sensors reading.
:param chassis: Redfish `chassis` object
:returns: returns a dict of sensor data.
"""
sensors = {}
for fan in chassis.thermal.fans:
sensor = cls._sensor2dict(
fan, 'identity', 'max_reading_range',
'min_reading_range', 'reading', 'reading_units',
'serial_number', 'physical_context')
sensor.update(cls._sensor2dict(fan.status, 'state', 'health'))
unique_name = '%s@%s' % (fan.identity, chassis.identity)
sensors[unique_name] = sensor
return sensors
@classmethod
def _get_sensors_temperatures(cls, chassis):
"""Get temperature sensors reading.
:param chassis: Redfish `chassis` object
:returns: returns a dict of sensor data.
"""
sensors = {}
for temps in chassis.thermal.temperatures:
sensor = cls._sensor2dict(
temps, 'identity', 'max_reading_range_temp',
'min_reading_range_temp', 'reading_celsius',
'physical_context', 'sensor_number')
sensor.update(cls._sensor2dict(temps.status, 'state', 'health'))
unique_name = '%s@%s' % (temps.identity, chassis.identity)
sensors[unique_name] = sensor
return sensors
@classmethod
def _get_sensors_power(cls, chassis):
"""Get power supply sensors reading.
:param chassis: Redfish `chassis` object
:returns: returns a dict of sensor data.
"""
sensors = {}
for power in chassis.power.power_supplies:
sensor = cls._sensor2dict(
power, 'power_capacity_watts',
'line_input_voltage', 'last_power_output_watts',
'serial_number')
sensor.update(cls._sensor2dict(power.status, 'state', 'health'))
sensor.update(cls._sensor2dict(
power.input_ranges, 'minimum_voltage',
'maximum_voltage', 'minimum_frequency_hz',
'maximum_frequency_hz', 'output_wattage'))
unique_name = '%s:%s@%s' % (
power.identity, chassis.power.identity,
chassis.identity)
sensors[unique_name] = sensor
return sensors
@classmethod
def _get_sensors_drive(cls, system):
"""Get storage drive sensors reading.
:param chassis: Redfish `system` object
:returns: returns a dict of sensor data.
"""
sensors = {}
for storage in system.simple_storage.get_members():
for drive in storage.devices:
sensor = cls._sensor2dict(
drive, 'name', 'model', 'capacity_bytes')
sensor.update(
cls._sensor2dict(drive.status, 'state', 'health'))
unique_name = '%s:%s@%s' % (
drive.name, storage.identity, system.identity)
sensors[unique_name] = sensor
return sensors
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:raises: InvalidParameterValue if required parameters
are missing.
:raises: MissingParameterValue if a required parameter is missing.
:returns: returns a dict of sensor data grouped by sensor type.
"""
node = task.node
sensors = collections.defaultdict(dict)
system = redfish_utils.get_system(node)
for chassis in system.chassis:
try:
sensors['Fan'].update(self._get_sensors_fan(chassis))
except sushy.exceptions.SushyError as exc:
LOG.debug("Failed reading fan information for node "
"%(node)s: %(error)s", {'node': node.uuid,
'error': exc})
try:
sensors['Temperature'].update(
self._get_sensors_temperatures(chassis))
except sushy.exceptions.SushyError as exc:
LOG.debug("Failed reading temperature information for node "
"%(node)s: %(error)s", {'node': node.uuid,
'error': exc})
try:
sensors['Power'].update(self._get_sensors_power(chassis))
except sushy.exceptions.SushyError as exc:
LOG.debug("Failed reading power information for node "
"%(node)s: %(error)s", {'node': node.uuid,
'error': exc})
try:
sensors['Drive'].update(self._get_sensors_drive(system))
except sushy.exceptions.SushyError as exc:
LOG.debug("Failed reading drive information for node "
"%(node)s: %(error)s", {'node': node.uuid,
'error': exc})
LOG.debug("Gathered sensor data: %(sensors)s", {'sensors': sensors})
return sensors
@task_manager.require_exclusive_lock
def inject_nmi(self, task):
"""Inject NMI, Non Maskable Interrupt.
Inject NMI (Non Maskable Interrupt) for a node immediately.
:param task: A TaskManager instance containing the node to act on.
:raises: InvalidParameterValue on malformed parameter(s)
:raises: MissingParameterValue on missing parameter(s)
:raises: RedfishConnectionError when it fails to connect to Redfish
:raises: RedfishError on an error from the Sushy library
"""
system = redfish_utils.get_system(task.node)
try:
system.reset_system(sushy.RESET_NMI)
except sushy.exceptions.SushyError as e:
error_msg = (_('Redfish inject NMI failed for node %(node)s. '
'Error: %(error)s') % {'node': task.node.uuid,
'error': e})
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
def get_supported_indicators(self, task, component=None):
"""Get a map of the supported indicators (e.g. LEDs).
:param task: A task from TaskManager.
:param component: If not `None`, return indicator information
for just this component, otherwise return indicators for
all existing components.
:returns: A dictionary of hardware components
(:mod:`ironic.common.components`) as keys with values
being dictionaries having indicator IDs as keys and indicator
properties as values.
::
{
'chassis': {
'enclosure-0': {
"readonly": true,
"states": [
"OFF",
"ON"
]
}
},
'system':
'blade-A': {
"readonly": true,
"states": [
"OFF",
"ON"
]
}
},
'drive':
'ssd0': {
"readonly": true,
"states": [
"OFF",
"ON"
]
}
}
}
"""
properties = {
"readonly": False,
"states": [
indicator_states.BLINKING,
indicator_states.OFF,
indicator_states.ON
]
}
indicators = {}
system = redfish_utils.get_system(task.node)
try:
if component in (None, components.CHASSIS) and system.chassis:
indicators[components.CHASSIS] = {
chassis.uuid: properties for chassis in system.chassis
if chassis.indicator_led
}
except sushy.exceptions.SushyError as e:
LOG.debug('Chassis indicator not available for node %(node)s: '
'%(error)s', {'node': task.node.uuid, 'error': e})
try:
if component in (None, components.SYSTEM) and system.indicator_led:
indicators[components.SYSTEM] = {
system.uuid: properties
}
except sushy.exceptions.SushyError as e:
LOG.debug('System indicator not available for node %(node)s: '
'%(error)s', {'node': task.node.uuid, 'error': e})
try:
if (component in (None, components.DISK)
and system.simple_storage
and system.simple_storage.drives):
indicators[components.DISK] = {
drive.uuid: properties
for drive in system.simple_storage.drives
if drive.indicator_led
}
except sushy.exceptions.SushyError as e:
LOG.debug('Drive indicator not available for node %(node)s: '
'%(error)s', {'node': task.node.uuid, 'error': e})
return indicators
def set_indicator_state(self, task, component, indicator, state):
"""Set indicator on the hardware component to the desired state.
:param task: A task from TaskManager.
:param component: The hardware component, one of
:mod:`ironic.common.components`.
:param indicator: Indicator ID (as reported by
`get_supported_indicators`).
:param state: Desired state of the indicator, one of
:mod:`ironic.common.indicator_states`.
:raises: InvalidParameterValue if an invalid component, indicator
or state is specified.
:raises: MissingParameterValue if a required parameter is missing
:raises: RedfishError on an error from the Sushy library
"""
system = redfish_utils.get_system(task.node)
try:
if (component == components.SYSTEM
and indicator == system.uuid):
system.set_indicator_led(INDICATOR_MAP_REV[state])
return
elif (component == components.CHASSIS
and system.chassis):
for chassis in system.chassis:
if chassis.uuid == indicator:
chassis.set_indicator_led(
INDICATOR_MAP_REV[state])
return
elif (component == components.DISK
and system.simple_storage
and system.simple_storage.drives):
for drive in system.simple_storage.drives:
if drive.uuid == indicator:
drive.set_indicator_led(
INDICATOR_MAP_REV[state])
return
except sushy.exceptions.SushyError as e:
error_msg = (_('Redfish set %(component)s indicator %(indicator)s '
'state %(state)s failed for node %(node)s. Error: '
'%(error)s') % {'component': component,
'indicator': indicator,
'state': state,
'node': task.node.uuid,
'error': e})
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
raise exception.MissingParameterValue(_(
"Unknown indicator %(indicator)s for component %(component)s of "
"node %(uuid)s") % {'indicator': indicator,
'component': component,
'uuid': task.node.uuid})
def get_indicator_state(self, task, component, indicator):
"""Get current state of the indicator of the hardware component.
:param task: A task from TaskManager.
:param component: The hardware component, one of
:mod:`ironic.common.components`.
:param indicator: Indicator ID (as reported by
`get_supported_indicators`).
:raises: MissingParameterValue if a required parameter is missing
:raises: RedfishError on an error from the Sushy library
:returns: Current state of the indicator, one of
:mod:`ironic.common.indicator_states`.
"""
system = redfish_utils.get_system(task.node)
try:
if (component == components.SYSTEM
and indicator == system.uuid):
return INDICATOR_MAP[system.indicator_led]
if (component == components.CHASSIS
and system.chassis):
for chassis in system.chassis:
if chassis.uuid == indicator:
return INDICATOR_MAP[chassis.indicator_led]
if (component == components.DISK
and system.simple_storage
and system.simple_storage.drives):
for drive in system.simple_storage.drives:
if drive.uuid == indicator:
return INDICATOR_MAP[drive.indicator_led]
except sushy.exceptions.SushyError as e:
error_msg = (_('Redfish get %(component)s indicator %(indicator)s '
'state failed for node %(node)s. Error: '
'%(error)s') % {'component': component,
'indicator': indicator,
'node': task.node.uuid,
'error': e})
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
raise exception.MissingParameterValue(_(
"Unknown indicator %(indicator)s for component %(component)s of "
"node %(uuid)s") % {'indicator': indicator,
'component': component,
'uuid': task.node.uuid})
| 39.043624 | 79 | 0.5685 |
7f712ab4907aa60851e759e54928778cac57e063 | 2,882 | py | Python | unittest/python/bindings_geometry_object.py | yDMhaven/pinocchio | fabed17d5ad0dc1c8d251c64cfa656a0215469a5 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2020-04-07T07:23:34.000Z | 2020-04-07T07:23:34.000Z | unittest/python/bindings_geometry_object.py | yDMhaven/pinocchio | fabed17d5ad0dc1c8d251c64cfa656a0215469a5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | unittest/python/bindings_geometry_object.py | yDMhaven/pinocchio | fabed17d5ad0dc1c8d251c64cfa656a0215469a5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import unittest
import pinocchio as pin
pin.switchToNumpyMatrix()
import numpy as np
@unittest.skipUnless(pin.WITH_HPP_FCL,"Needs HPP-FCL")
class TestGeometryObjectBindings(unittest.TestCase):
def setUp(self):
self.model = pin.buildSampleModelHumanoid()
self.collision_model = pin.buildSampleGeometryModelHumanoid(self.model)
def test_name_get_set(self):
col = self.collision_model.geometryObjects[0]
self.assertTrue(col.name == 'rleg_shoulder_object')
col.name = 'new_collision_name'
self.assertTrue(col.name == 'new_collision_name')
def test_parent_get_set(self):
col = self.collision_model.geometryObjects[0]
self.assertTrue(col.parentJoint == 2)
col.parentJoint = 3
self.assertTrue(col.parentJoint == 3)
def test_placement_get_set(self):
m = pin.SE3.Identity()
new_m = pin.SE3.Random()
col = self.collision_model.geometryObjects[0]
self.assertTrue(np.allclose(col.placement.homogeneous,m.homogeneous))
col.placement = new_m
self.assertTrue(np.allclose(col.placement.homogeneous , new_m.homogeneous))
def test_meshpath_get(self):
col = self.collision_model.geometryObjects[0]
self.assertTrue(col.meshPath is not None)
def test_scale(self):
scale = np.array([1.,2.,3.])
pin.setGeometryMeshScales(self.collision_model,scale)
for obj in self.collision_model.geometryObjects:
self.assertTrue(obj.meshScale[0] == scale[0])
self.assertTrue(obj.meshScale[1] == scale[1])
self.assertTrue(obj.meshScale[2] == scale[2])
def test_scalar_scale(self):
scale = 2.
vec = np.array([scale]*3)
pin.setGeometryMeshScales(self.collision_model,scale)
for obj in self.collision_model.geometryObjects:
self.assertTrue(np.allclose(obj.meshScale, vec))
def test_create_data(self):
collision_data = self.collision_model.createData()
self.assertEqual(len(collision_data.oMg), self.collision_model.ngeoms)
def test_create_datas(self):
collision_data = self.collision_model.createData()
self.assertEqual(len(collision_data.oMg), self.collision_model.ngeoms)
data_2, collision_data_2 = pin.createDatas(self.model, self.collision_model)
self.assertTrue(self.model.check(data_2))
self.assertEqual(len(collision_data_2.oMg), self.collision_model.ngeoms)
def test_copy(self):
collision_model_copy = self.collision_model.copy()
self.assertEqual(self.collision_model.ngeoms,collision_model_copy.ngeoms)
collision_data = self.collision_model.createData()
collision_data_copy = collision_data.copy()
self.assertEqual(len(collision_data.oMg),len(collision_data_copy.oMg))
if __name__ == '__main__':
unittest.main()
| 38.426667 | 84 | 0.699167 |
6bda0d3827a53cafe56caf83facb9538a0fe5497 | 16,124 | py | Python | tests/safety/common.py | HyreAS/panda | 1a635b2d66f2b79b6305ac80e9ba31085347077d | [
"MIT"
] | null | null | null | tests/safety/common.py | HyreAS/panda | 1a635b2d66f2b79b6305ac80e9ba31085347077d | [
"MIT"
] | null | null | null | tests/safety/common.py | HyreAS/panda | 1a635b2d66f2b79b6305ac80e9ba31085347077d | [
"MIT"
] | null | null | null | import os
import abc
import struct
import unittest
import importlib
import numpy as np
from typing import Optional, List, Dict
from opendbc.can.packer import CANPacker # pylint: disable=import-error
from panda.tests.safety import libpandasafety_py
MAX_WRONG_COUNTERS = 5
class UNSAFE_MODE:
DEFAULT = 0
DISABLE_DISENGAGE_ON_GAS = 1
DISABLE_STOCK_AEB = 2
RAISE_LONGITUDINAL_LIMITS_TO_ISO_MAX = 8
def package_can_msg(msg):
addr, _, dat, bus = msg
rdlr, rdhr = struct.unpack('II', dat.ljust(8, b'\x00'))
ret = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
if addr >= 0x800:
ret[0].RIR = (addr << 3) | 5
else:
ret[0].RIR = (addr << 21) | 1
ret[0].RDTR = len(dat) | ((bus & 0xF) << 4)
ret[0].RDHR = rdhr
ret[0].RDLR = rdlr
return ret
def make_msg(bus, addr, length=8):
return package_can_msg([addr, 0, b'\x00' * length, bus])
class CANPackerPanda(CANPacker):
def make_can_msg_panda(self, name_or_addr, bus, values, counter=-1, fix_checksum=None):
msg = self.make_can_msg(name_or_addr, bus, values, counter=-1)
if fix_checksum is not None:
msg = fix_checksum(msg)
return package_can_msg(msg)
class PandaSafetyTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
if cls.__name__ == "PandaSafetyTestBase":
cls.safety = None
raise unittest.SkipTest
def _rx(self, msg):
return self.safety.safety_rx_hook(msg)
def _tx(self, msg):
return self.safety.safety_tx_hook(msg)
class InterceptorSafetyTest(PandaSafetyTestBase):
INTERCEPTOR_THRESHOLD = 0
@classmethod
def setUpClass(cls):
if cls.__name__ == "InterceptorSafetyTest":
cls.safety = None
raise unittest.SkipTest
@abc.abstractmethod
def _interceptor_msg(self, gas, addr):
pass
def test_prev_gas_interceptor(self):
self._rx(self._interceptor_msg(0x0, 0x201))
self.assertFalse(self.safety.get_gas_interceptor_prev())
self._rx(self._interceptor_msg(0x1000, 0x201))
self.assertTrue(self.safety.get_gas_interceptor_prev())
self._rx(self._interceptor_msg(0x0, 0x201))
self.safety.set_gas_interceptor_detected(False)
def test_disengage_on_gas_interceptor(self):
for g in range(0, 0x1000):
self._rx(self._interceptor_msg(0, 0x201))
self.safety.set_controls_allowed(True)
self._rx(self._interceptor_msg(g, 0x201))
remain_enabled = g <= self.INTERCEPTOR_THRESHOLD
self.assertEqual(remain_enabled, self.safety.get_controls_allowed())
self._rx(self._interceptor_msg(0, 0x201))
self.safety.set_gas_interceptor_detected(False)
def test_unsafe_mode_no_disengage_on_gas_interceptor(self):
self.safety.set_controls_allowed(True)
self.safety.set_unsafe_mode(UNSAFE_MODE.DISABLE_DISENGAGE_ON_GAS)
for g in range(0, 0x1000):
self._rx(self._interceptor_msg(g, 0x201))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._interceptor_msg(0, 0x201))
self.safety.set_gas_interceptor_detected(False)
self.safety.set_unsafe_mode(UNSAFE_MODE.DEFAULT)
def test_allow_engage_with_gas_interceptor_pressed(self):
self._rx(self._interceptor_msg(0x1000, 0x201))
self.safety.set_controls_allowed(1)
self._rx(self._interceptor_msg(0x1000, 0x201))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._interceptor_msg(0, 0x201))
def test_gas_interceptor_safety_check(self):
for gas in np.arange(0, 4000, 100):
for controls_allowed in [True, False]:
self.safety.set_controls_allowed(controls_allowed)
if controls_allowed:
send = True
else:
send = gas == 0
self.assertEqual(send, self._tx(self._interceptor_msg(gas, 0x200)))
class TorqueSteeringSafetyTest(PandaSafetyTestBase):
MAX_RATE_UP = 0
MAX_RATE_DOWN = 0
MAX_TORQUE = 0
MAX_RT_DELTA = 0
RT_INTERVAL = 0
MAX_TORQUE_ERROR = 0
TORQUE_MEAS_TOLERANCE = 0
@classmethod
def setUpClass(cls):
if cls.__name__ == "TorqueSteeringSafetyTest":
cls.safety = None
raise unittest.SkipTest
@abc.abstractmethod
def _torque_meas_msg(self, torque):
pass
@abc.abstractmethod
def _torque_msg(self, torque):
pass
def _set_prev_torque(self, t):
self.safety.set_desired_torque_last(t)
self.safety.set_rt_torque_last(t)
self.safety.set_torque_meas(t, t)
def test_steer_safety_check(self):
for enabled in [0, 1]:
for t in range(-self.MAX_TORQUE * 2, self.MAX_TORQUE * 2):
self.safety.set_controls_allowed(enabled)
self._set_prev_torque(t)
if abs(t) > self.MAX_TORQUE or (not enabled and abs(t) > 0):
self.assertFalse(self._tx(self._torque_msg(t)))
else:
self.assertTrue(self._tx(self._torque_msg(t)))
def test_torque_absolute_limits(self):
for controls_allowed in [True, False]:
for torque in np.arange(-self.MAX_TORQUE - 1000, self.MAX_TORQUE + 1000, self.MAX_RATE_UP):
self.safety.set_controls_allowed(controls_allowed)
self.safety.set_rt_torque_last(torque)
self.safety.set_torque_meas(torque, torque)
self.safety.set_desired_torque_last(torque - self.MAX_RATE_UP)
if controls_allowed:
send = (-self.MAX_TORQUE <= torque <= self.MAX_TORQUE)
else:
send = torque == 0
self.assertEqual(send, self._tx(self._torque_msg(torque)))
def test_non_realtime_limit_up(self):
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertTrue(self._tx(self._torque_msg(self.MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertFalse(self._tx(self._torque_msg(self.MAX_RATE_UP + 1)))
def test_non_realtime_limit_down(self):
self.safety.set_controls_allowed(True)
torque_meas = self.MAX_TORQUE - self.MAX_TORQUE_ERROR - 50
self.safety.set_rt_torque_last(self.MAX_TORQUE)
self.safety.set_torque_meas(torque_meas, torque_meas)
self.safety.set_desired_torque_last(self.MAX_TORQUE)
self.assertTrue(self._tx(self._torque_msg(self.MAX_TORQUE - self.MAX_RATE_DOWN)))
self.safety.set_rt_torque_last(self.MAX_TORQUE)
self.safety.set_torque_meas(torque_meas, torque_meas)
self.safety.set_desired_torque_last(self.MAX_TORQUE)
self.assertFalse(self._tx(self._torque_msg(self.MAX_TORQUE - self.MAX_RATE_DOWN + 1)))
def test_exceed_torque_sensor(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self._set_prev_torque(0)
for t in np.arange(0, self.MAX_TORQUE_ERROR + 2, 2): # step needs to be smaller than MAX_TORQUE_ERROR
t *= sign
self.assertTrue(self._tx(self._torque_msg(t)))
self.assertFalse(self._tx(self._torque_msg(sign * (self.MAX_TORQUE_ERROR + 2))))
def test_realtime_limit_up(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self.safety.init_tests()
self._set_prev_torque(0)
for t in np.arange(0, self.MAX_RT_DELTA + 1, 1):
t *= sign
self.safety.set_torque_meas(t, t)
self.assertTrue(self._tx(self._torque_msg(t)))
self.assertFalse(self._tx(self._torque_msg(sign * (self.MAX_RT_DELTA + 1))))
self._set_prev_torque(0)
for t in np.arange(0, self.MAX_RT_DELTA + 1, 1):
t *= sign
self.safety.set_torque_meas(t, t)
self.assertTrue(self._tx(self._torque_msg(t)))
# Increase timer to update rt_torque_last
self.safety.set_timer(self.RT_INTERVAL + 1)
self.assertTrue(self._tx(self._torque_msg(sign * self.MAX_RT_DELTA)))
self.assertTrue(self._tx(self._torque_msg(sign * (self.MAX_RT_DELTA + 1))))
def test_torque_measurements(self):
trq = 50
for t in [trq, -trq, 0, 0, 0, 0]:
self._rx(self._torque_meas_msg(t))
max_range = range(trq, trq + self.TORQUE_MEAS_TOLERANCE + 1)
min_range = range(-(trq + self.TORQUE_MEAS_TOLERANCE), -trq + 1)
self.assertTrue(self.safety.get_torque_meas_min() in min_range)
self.assertTrue(self.safety.get_torque_meas_max() in max_range)
max_range = range(0, self.TORQUE_MEAS_TOLERANCE + 1)
min_range = range(-(trq + self.TORQUE_MEAS_TOLERANCE), -trq + 1)
self._rx(self._torque_meas_msg(0))
self.assertTrue(self.safety.get_torque_meas_min() in min_range)
self.assertTrue(self.safety.get_torque_meas_max() in max_range)
max_range = range(0, self.TORQUE_MEAS_TOLERANCE + 1)
min_range = range(-self.TORQUE_MEAS_TOLERANCE, 0 + 1)
self._rx(self._torque_meas_msg(0))
self.assertTrue(self.safety.get_torque_meas_min() in min_range)
self.assertTrue(self.safety.get_torque_meas_max() in max_range)
class PandaSafetyTest(PandaSafetyTestBase):
TX_MSGS: Optional[List[List[int]]] = None
STANDSTILL_THRESHOLD: Optional[float] = None
GAS_PRESSED_THRESHOLD = 0
RELAY_MALFUNCTION_ADDR: Optional[int] = None
RELAY_MALFUNCTION_BUS: Optional[int] = None
FWD_BLACKLISTED_ADDRS: Dict[int, List[int]] = {} # {bus: [addr]}
FWD_BUS_LOOKUP: Dict[int, int] = {}
@classmethod
def setUpClass(cls):
if cls.__name__ == "PandaSafetyTest":
cls.safety = None
raise unittest.SkipTest
@abc.abstractmethod
def _brake_msg(self, brake):
pass
@abc.abstractmethod
def _speed_msg(self, speed):
pass
@abc.abstractmethod
def _gas_msg(self, gas):
pass
@abc.abstractmethod
def _pcm_status_msg(self, enable):
pass
# ***** standard tests for all safety modes *****
def test_relay_malfunction(self):
# each car has an addr that is used to detect relay malfunction
# if that addr is seen on specified bus, triggers the relay malfunction
# protection logic: both tx_hook and fwd_hook are expected to return failure
self.assertFalse(self.safety.get_relay_malfunction())
self._rx(make_msg(self.RELAY_MALFUNCTION_BUS, self.RELAY_MALFUNCTION_ADDR, 8))
self.assertTrue(self.safety.get_relay_malfunction())
for a in range(1, 0x800):
for b in range(0, 3):
self.assertEqual(-1, self._tx(make_msg(b, a, 8)))
self.assertEqual(-1, self.safety.safety_fwd_hook(b, make_msg(b, a, 8)))
def test_fwd_hook(self):
# some safety modes don't forward anything, while others blacklist msgs
for bus in range(0x0, 0x3):
for addr in range(0x1, 0x800):
# assume len 8
msg = make_msg(bus, addr, 8)
fwd_bus = self.FWD_BUS_LOOKUP.get(bus, -1)
if bus in self.FWD_BLACKLISTED_ADDRS and addr in self.FWD_BLACKLISTED_ADDRS[bus]:
fwd_bus = -1
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(bus, msg))
def test_spam_can_buses(self):
for addr in range(1, 0x800):
for bus in range(0, 4):
if all(addr != m[0] or bus != m[1] for m in self.TX_MSGS):
self.assertFalse(self._tx(make_msg(bus, addr, 8)))
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_manually_enable_controls_allowed(self):
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(0)
self.assertFalse(self.safety.get_controls_allowed())
def test_prev_gas(self):
self.assertFalse(self.safety.get_gas_pressed_prev())
for pressed in [self.GAS_PRESSED_THRESHOLD + 1, 0]:
self._rx(self._gas_msg(pressed))
self.assertEqual(bool(pressed), self.safety.get_gas_pressed_prev())
def test_allow_engage_with_gas_pressed(self):
self._rx(self._gas_msg(1))
self.safety.set_controls_allowed(True)
self._rx(self._gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_disengage_on_gas(self):
self._rx(self._gas_msg(0))
self.safety.set_controls_allowed(True)
self._rx(self._gas_msg(self.GAS_PRESSED_THRESHOLD + 1))
self.assertFalse(self.safety.get_controls_allowed())
def test_unsafe_mode_no_disengage_on_gas(self):
self._rx(self._gas_msg(0))
self.safety.set_controls_allowed(True)
self.safety.set_unsafe_mode(UNSAFE_MODE.DISABLE_DISENGAGE_ON_GAS)
self._rx(self._gas_msg(self.GAS_PRESSED_THRESHOLD + 1))
self.assertTrue(self.safety.get_controls_allowed())
def test_prev_brake(self):
self.assertFalse(self.safety.get_brake_pressed_prev())
for pressed in [True, False]:
self._rx(self._brake_msg(not pressed))
self.assertEqual(not pressed, self.safety.get_brake_pressed_prev())
self._rx(self._brake_msg(pressed))
self.assertEqual(pressed, self.safety.get_brake_pressed_prev())
def test_enable_control_allowed_from_cruise(self):
self._rx(self._pcm_status_msg(False))
self.assertFalse(self.safety.get_controls_allowed())
self._rx(self._pcm_status_msg(True))
self.assertTrue(self.safety.get_controls_allowed())
def test_disable_control_allowed_from_cruise(self):
self.safety.set_controls_allowed(1)
self._rx(self._pcm_status_msg(False))
self.assertFalse(self.safety.get_controls_allowed())
def test_cruise_engaged_prev(self):
for engaged in [True, False]:
self._rx(self._pcm_status_msg(engaged))
self.assertEqual(engaged, self.safety.get_cruise_engaged_prev())
self._rx(self._pcm_status_msg(not engaged))
self.assertEqual(not engaged, self.safety.get_cruise_engaged_prev())
def test_allow_brake_at_zero_speed(self):
# Brake was already pressed
self._rx(self._speed_msg(0))
self._rx(self._brake_msg(1))
self.safety.set_controls_allowed(1)
self._rx(self._brake_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._brake_msg(0))
self.assertTrue(self.safety.get_controls_allowed())
# rising edge of brake should disengage
self._rx(self._brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
self._rx(self._brake_msg(0)) # reset no brakes
def test_not_allow_brake_when_moving(self):
# Brake was already pressed
self._rx(self._brake_msg(1))
self.safety.set_controls_allowed(1)
self._rx(self._speed_msg(self.STANDSTILL_THRESHOLD))
self._rx(self._brake_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._speed_msg(self.STANDSTILL_THRESHOLD + 1))
self._rx(self._brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
self._rx(self._speed_msg(0))
def test_sample_speed(self):
self.assertFalse(self.safety.get_vehicle_moving())
# not moving
self.safety.safety_rx_hook(self._speed_msg(0))
self.assertFalse(self.safety.get_vehicle_moving())
# speed is at threshold
self.safety.safety_rx_hook(self._speed_msg(self.STANDSTILL_THRESHOLD))
self.assertFalse(self.safety.get_vehicle_moving())
# past threshold
self.safety.safety_rx_hook(self._speed_msg(self.STANDSTILL_THRESHOLD + 1))
self.assertTrue(self.safety.get_vehicle_moving())
def test_tx_hook_on_wrong_safety_mode(self):
files = os.listdir(os.path.dirname(os.path.realpath(__file__)))
test_files = [f for f in files if f.startswith("test_") and f.endswith(".py")]
current_test = self.__class__.__name__
all_tx = []
for tf in test_files:
test = importlib.import_module("panda.tests.safety."+tf[:-3])
for attr in dir(test):
if attr.startswith("Test") and attr != current_test:
tx = getattr(getattr(test, attr), "TX_MSGS")
if tx is not None:
# TODO: Temporary, should be fixed in panda firmware, safety_honda.h
if attr in ['TestHondaBoschLongGiraffeSafety', 'TestHondaNidecSafety']:
tx = list(filter(lambda m: m[0] not in [0x1FA, 0x30C], tx))
all_tx.append(tx)
# make sure we got all the msgs
self.assertTrue(len(all_tx) >= len(test_files)-1)
for tx_msgs in all_tx:
for addr, bus in tx_msgs:
msg = make_msg(bus, addr)
self.safety.set_controls_allowed(1)
# TODO: this should be blocked
if current_test in ["TestNissanSafety", "TestNissanLeafSafety"] and [addr, bus] in self.TX_MSGS:
continue
self.assertFalse(self._tx(msg), f"{addr=} {bus=} got through")
| 36.152466 | 108 | 0.71651 |
6dcc997b4c7327471116aa56a186ebdd482c8c16 | 9,638 | py | Python | Lib/pipes.py | arvindm95/unladen-swallow | 8175e37eaea7ca66ed03283b46bc1d2db0d3f9c3 | [
"PSF-2.0"
] | 2,293 | 2015-01-02T12:46:10.000Z | 2022-03-29T09:45:43.000Z | python/src/Lib/pipes.py | weiqiangzheng/sl4a | d3c17dca978cbeee545e12ea240a9dbf2a6999e9 | [
"Apache-2.0"
] | 315 | 2015-05-31T11:55:46.000Z | 2022-01-12T08:36:37.000Z | python/src/Lib/pipes.py | weiqiangzheng/sl4a | d3c17dca978cbeee545e12ea240a9dbf2a6999e9 | [
"Apache-2.0"
] | 1,033 | 2015-01-04T07:48:40.000Z | 2022-03-24T09:34:37.000Z | """Conversion pipeline templates.
The problem:
------------
Suppose you have some data that you want to convert to another format,
such as from GIF image format to PPM image format. Maybe the
conversion involves several steps (e.g. piping it through compress or
uuencode). Some of the conversion steps may require that their input
is a disk file, others may be able to read standard input; similar for
their output. The input to the entire conversion may also be read
from a disk file or from an open file, and similar for its output.
The module lets you construct a pipeline template by sticking one or
more conversion steps together. It will take care of creating and
removing temporary files if they are necessary to hold intermediate
data. You can then use the template to do conversions from many
different sources to many different destinations. The temporary
file names used are different each time the template is used.
The templates are objects so you can create templates for many
different conversion steps and store them in a dictionary, for
instance.
Directions:
-----------
To create a template:
t = Template()
To add a conversion step to a template:
t.append(command, kind)
where kind is a string of two characters: the first is '-' if the
command reads its standard input or 'f' if it requires a file; the
second likewise for the output. The command must be valid /bin/sh
syntax. If input or output files are required, they are passed as
$IN and $OUT; otherwise, it must be possible to use the command in
a pipeline.
To add a conversion step at the beginning:
t.prepend(command, kind)
To convert a file to another file using a template:
sts = t.copy(infile, outfile)
If infile or outfile are the empty string, standard input is read or
standard output is written, respectively. The return value is the
exit status of the conversion pipeline.
To open a file for reading or writing through a conversion pipeline:
fp = t.open(file, mode)
where mode is 'r' to read the file, or 'w' to write it -- just like
for the built-in function open() or for os.popen().
To create a new template object initialized to a given one:
t2 = t.clone()
For an example, see the function test() at the end of the file.
""" # '
import re
import os
import tempfile
import string
__all__ = ["Template"]
# Conversion step kinds
FILEIN_FILEOUT = 'ff' # Must read & write real files
STDIN_FILEOUT = '-f' # Must write a real file
FILEIN_STDOUT = 'f-' # Must read a real file
STDIN_STDOUT = '--' # Normal pipeline element
SOURCE = '.-' # Must be first, writes stdout
SINK = '-.' # Must be last, reads stdin
stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
SOURCE, SINK]
class Template:
"""Class representing a pipeline template."""
def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0
self.reset()
def __repr__(self):
"""t.__repr__() implements repr(t)."""
return '<Template instance, steps=%r>' % (self.steps,)
def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = []
def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template()
t.steps = self.steps[:]
t.debugging = self.debugging
return t
def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag
def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if type(cmd) is not type(''):
raise TypeError, \
'Template.append: cmd must be a string'
if kind not in stepkinds:
raise ValueError, \
'Template.append: bad kind %r' % (kind,)
if kind == SOURCE:
raise ValueError, \
'Template.append: SOURCE can only be prepended'
if self.steps and self.steps[-1][1] == SINK:
raise ValueError, \
'Template.append: already ends with SINK'
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError, \
'Template.append: missing $IN in cmd'
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError, \
'Template.append: missing $OUT in cmd'
self.steps.append((cmd, kind))
def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if type(cmd) is not type(''):
raise TypeError, \
'Template.prepend: cmd must be a string'
if kind not in stepkinds:
raise ValueError, \
'Template.prepend: bad kind %r' % (kind,)
if kind == SINK:
raise ValueError, \
'Template.prepend: SINK can only be appended'
if self.steps and self.steps[0][1] == SOURCE:
raise ValueError, \
'Template.prepend: already begins with SOURCE'
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError, \
'Template.prepend: missing $IN in cmd'
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError, \
'Template.prepend: missing $OUT in cmd'
self.steps.insert(0, (cmd, kind))
def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r':
return self.open_r(file)
if rw == 'w':
return self.open_w(file)
raise ValueError, \
'Template.open: rw must be \'r\' or \'w\', not %r' % (rw,)
def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if not self.steps:
return open(file, 'r')
if self.steps[-1][1] == SINK:
raise ValueError, \
'Template.open_r: pipeline ends width SINK'
cmd = self.makepipeline(file, '')
return os.popen(cmd, 'r')
def open_w(self, file):
if not self.steps:
return open(file, 'w')
if self.steps[0][1] == SOURCE:
raise ValueError, \
'Template.open_w: pipeline begins with SOURCE'
cmd = self.makepipeline('', file)
return os.popen(cmd, 'w')
def copy(self, infile, outfile):
return os.system(self.makepipeline(infile, outfile))
def makepipeline(self, infile, outfile):
cmd = makepipeline(infile, self.steps, outfile)
if self.debugging:
print cmd
cmd = 'set -x; ' + cmd
return cmd
def makepipeline(infile, steps, outfile):
# Build a list with for each command:
# [input filename or '', command string, kind, output filename or '']
list = []
for cmd, kind in steps:
list.append(['', cmd, kind, ''])
#
# Make sure there is at least one step
#
if not list:
list.append(['', 'cat', '--', ''])
#
# Take care of the input and output ends
#
[cmd, kind] = list[0][1:3]
if kind[0] == 'f' and not infile:
list.insert(0, ['', 'cat', '--', ''])
list[0][0] = infile
#
[cmd, kind] = list[-1][1:3]
if kind[1] == 'f' and not outfile:
list.append(['', 'cat', '--', ''])
list[-1][-1] = outfile
#
# Invent temporary files to connect stages that need files
#
garbage = []
for i in range(1, len(list)):
lkind = list[i-1][2]
rkind = list[i][2]
if lkind[1] == 'f' or rkind[0] == 'f':
(fd, temp) = tempfile.mkstemp()
os.close(fd)
garbage.append(temp)
list[i-1][-1] = list[i][0] = temp
#
for item in list:
[inf, cmd, kind, outf] = item
if kind[1] == 'f':
cmd = 'OUT=' + quote(outf) + '; ' + cmd
if kind[0] == 'f':
cmd = 'IN=' + quote(inf) + '; ' + cmd
if kind[0] == '-' and inf:
cmd = cmd + ' <' + quote(inf)
if kind[1] == '-' and outf:
cmd = cmd + ' >' + quote(outf)
item[1] = cmd
#
cmdlist = list[0][1]
for item in list[1:]:
[cmd, kind] = item[1:3]
if item[0] == '':
if 'f' in kind:
cmd = '{ ' + cmd + '; }'
cmdlist = cmdlist + ' |\n' + cmd
else:
cmdlist = cmdlist + '\n' + cmd
#
if garbage:
rmcmd = 'rm -f'
for file in garbage:
rmcmd = rmcmd + ' ' + quote(file)
trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
#
return cmdlist
# Reliably quote a string as a single argument for /bin/sh
_safechars = string.ascii_letters + string.digits + '!@%_-+=:,./' # Safe unquoted
_funnychars = '"`$\\' # Unsafe inside "double quotes"
def quote(file):
for c in file:
if c not in _safechars:
break
else:
return file
if '\'' not in file:
return '\'' + file + '\''
res = ''
for c in file:
if c in _funnychars:
c = '\\' + c
res = res + c
return '"' + res + '"'
| 34.056537 | 81 | 0.56298 |
58fd3e3c8857b8ede1d2d1f1e058e64645f835f3 | 1,609 | py | Python | Software/src/liv/iotConnectors/emoncms/emoncms.py | nadaol/Weather_Station | 5bfb31c2974227fcc8d912e3911f356d4e3fb187 | [
"MIT"
] | null | null | null | Software/src/liv/iotConnectors/emoncms/emoncms.py | nadaol/Weather_Station | 5bfb31c2974227fcc8d912e3911f356d4e3fb187 | [
"MIT"
] | null | null | null | Software/src/liv/iotConnectors/emoncms/emoncms.py | nadaol/Weather_Station | 5bfb31c2974227fcc8d912e3911f356d4e3fb187 | [
"MIT"
] | null | null | null | import httplib, urllib
import httplib2 as http
import datetime
import json
from urlparse import urlparse
import time
if __name__ == "__main__":
apiKey = 'YOUR_API_KEY_HERE'
e_conn = httplib.HTTPConnection("emoncms.org:80")
e_headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
target = urlparse('http://localhost:5000/getAllRawSensorData')
method = 'GET'
body = ''
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json; charset=UTF-8'
}
h = http.Http()
while (1):
print 'writing data to emoncms'
response, content = h.request(target.geturl(), method, body, headers)
data = json.loads(content)
print data
now = datetime.datetime.utcnow()
print now
#ts_params = urllib.urlencode({'field1': '100', 'field2': '200', 'field3': '200', 'field4': '300', 'key': apiKey})
#e_params = urllib.urlencode({'field1': data['CO2level'], 'field2': data['Temperature'], 'field3': data['Humidity'], 'field4': data['AirPressure'], 'key': apiKey})
url = "http://emoncms.org/input/post.json? \
json={co2:" +data['CO2level'] +",temperature:" +data['Temperature']+",humidity:" +data['Humidity'] +",airPressure:" +data['AirPressure'] +"}" \
+"&apikey=4e206bf15f81dcedf72a5ae10f06ae1c"
print url
urllib.urlopen(url)
#headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
#e_conn.request("POST", "/api", e_params, e_headers)
#e_response = e_conn.getresponse()
#print e_response.status, e_response.reason
#data = e_response.read()
time.sleep(60)
| 29.796296 | 165 | 0.670603 |
01c5c457033a644aa2bbd2840bef8d1771037484 | 1,215 | py | Python | res/functions.py | RainMaker1707/SAT_in_python | 1d69a652811f598221ed8425223f45669a3cbcc9 | [
"MIT"
] | null | null | null | res/functions.py | RainMaker1707/SAT_in_python | 1d69a652811f598221ed8425223f45669a3cbcc9 | [
"MIT"
] | null | null | null | res/functions.py | RainMaker1707/SAT_in_python | 1d69a652811f598221ed8425223f45669a3cbcc9 | [
"MIT"
] | null | null | null | from res.members import *
from res.candidates import *
from random import randint, getrandbits
def setup(member_nbr: int, candidate_nbr: int):
"""
This function will make a list of potential candidates and a list of member
Each member has a random list of random demand
:non-param complexity: Ω(member_nbr ^ candidate_nbr)
:param member_nbr: number of member already registered in the 'club'
:param candidate_nbr: number of candidates who wish join the 'club'
:return: the list of member and each one list of demands
"""
for x in range(candidate_nbr):
Candidate()
for y in range(member_nbr):
this = Member()
for i in range(randint(1, CandidateList.size())):
temp = CandidateList.get()[randint(0, CandidateList.size() - 1)]
accept = bool(getrandbits(1))
# Check to avoid to have twice same demands or paradoxical demand
# like demand to accept one candidate and demand refuse this same one
while {'candidate': temp, 'accept': (True or False)} in this.get_demands():
temp = CandidateList.get()[randint(0, CandidateList.size() - 1)]
this.add_demand(temp, accept)
| 45 | 87 | 0.665021 |
f3231a6bb69219ec625692aa5a99246bcabfc2f2 | 4,530 | py | Python | delta/data/datasets/base_dataset.py | pangge/delta | 212939f9aa67e3971dc75914f75b9aecc40d1261 | [
"Apache-2.0"
] | null | null | null | delta/data/datasets/base_dataset.py | pangge/delta | 212939f9aa67e3971dc75914f75b9aecc40d1261 | [
"Apache-2.0"
] | null | null | null | delta/data/datasets/base_dataset.py | pangge/delta | 212939f9aa67e3971dc75914f75b9aecc40d1261 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data set operation class"""
import os
from typing import List
from absl import logging
from delta import PACKAGE_ROOT_DIR
from delta.utils.config import load_config
from delta.utils.config import save_config
class BaseDataSet:
"""Base Data set class."""
def __init__(self, project_dir: str):
self.project_dir: str = project_dir
# download files, must be under download_dir
self.download_files: List[str] = list()
# final generate files, must be under data_dir
self.data_files: List[str] = list()
# config files, must be under config_dir
self.config_files: List[str] = list()
self.origin_config_dir = os.path.join(PACKAGE_ROOT_DIR, "configs")
def download(self) -> bool:
"""Download dataset from Internet."""
raise NotImplementedError
def after_download(self) -> bool:
"""Dataset operations after download."""
raise NotImplementedError
def copy_config_files(self) -> None:
"""Copy config files"""
for config_file in self.config_files:
full_config_file = os.path.join(self.origin_config_dir, config_file)
new_config_file = os.path.join(self.config_dir, config_file)
config = load_config(full_config_file)
config['data']['project_dir'] = self.project_dir
logging.info(f"Save config from {full_config_file} to {new_config_file}")
save_config(config, new_config_file)
@property
def data_dir(self) -> str:
"""data directory"""
return os.path.join(self.project_dir, "data")
@property
def download_dir(self) -> str:
"""Download directory"""
return os.path.join(self.project_dir, "download")
@property
def config_dir(self) -> str:
"""Config directory"""
return os.path.join(self.project_dir, "config")
def _download_ready(self) -> bool:
"""If download is ready."""
for data_file in self.download_files:
full_data_file = os.path.join(self.data_dir, data_file)
if not os.path.exists(full_data_file):
logging.warning(f"Data: {full_data_file} do not exists!")
return False
return True
def is_ready(self) -> bool:
"""If the dataset is ready for using."""
if not os.path.exists(self.project_dir):
logging.warning(f"Directory: {self.project_dir} do not exists!")
return False
for data_file in self.data_files:
full_data_file = os.path.join(self.data_dir, data_file)
if not os.path.exists(full_data_file):
logging.warning(f"Data file: {full_data_file} do not exists!")
return False
for config_file in self.config_files:
full_config_file = os.path.join(self.config_dir, config_file)
if not os.path.exists(full_config_file):
logging.warning(f"Config file: {full_config_file} do not exists!")
return False
return True
def build(self) -> bool:
"""Build the dataset."""
if self.is_ready():
logging.info("Dataset is ready.")
return True
logging.info('Dataset is not ready!')
if not os.path.exists(self.project_dir):
os.mkdir(self.project_dir)
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
if not os.path.exists(self.config_dir):
os.mkdir(self.config_dir)
if not os.path.exists(self.download_dir):
os.mkdir(self.download_dir)
self.copy_config_files()
if not self._download_ready():
logging.info("Download not ready!")
logging.info("Start downloading ...")
download_res = self.download()
if not download_res:
logging.warning("Download failed.")
return False
logging.info("Start doing after download processing.")
after_res = self.after_download()
if not after_res:
logging.warning("After download process failed.")
return False
logging.info("Dataset is ready.")
return True
| 34.580153 | 80 | 0.685872 |
5ca55deb68159f11c32774cd61eb0e41961fb9f2 | 3,752 | py | Python | movie/settings.py | mwerumuchai/technical_screening | cc58d977436813bcb7fd1129f8f6bf053a569b15 | [
"MIT"
] | null | null | null | movie/settings.py | mwerumuchai/technical_screening | cc58d977436813bcb7fd1129f8f6bf053a569b15 | [
"MIT"
] | null | null | null | movie/settings.py | mwerumuchai/technical_screening | cc58d977436813bcb7fd1129f8f6bf053a569b15 | [
"MIT"
] | null | null | null | """
Django settings for movie project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG =config('DEBUG',default=False,cast=bool)
ALLOWED_HOSTS = ['*']
# ratings
STAR_RATINGS_RERATE = False
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'moviedb',
'star_ratings',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'movie.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'movie.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'movies',
'USER': 'muchai',
'PASSWORD':'Ngea!2017##',
}
}
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
# AUTH_USER_MODEL = 'core.User'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| 25.69863 | 91 | 0.702559 |
99914489c0bf07682f61844e6493fccf95d549c3 | 12,185 | py | Python | rl_coach/environments/starcraft2_environment.py | JohnnyPeng18/coach | 1ee9e10747c535b387a00c946efa220efd114d47 | [
"Apache-2.0"
] | 1,960 | 2017-10-19T10:31:24.000Z | 2020-11-07T18:19:23.000Z | rl_coach/environments/starcraft2_environment.py | JohnnyPeng18/coach | 1ee9e10747c535b387a00c946efa220efd114d47 | [
"Apache-2.0"
] | 349 | 2017-10-21T17:17:18.000Z | 2020-10-17T13:39:56.000Z | rl_coach/environments/starcraft2_environment.py | JohnnyPeng18/coach | 1ee9e10747c535b387a00c946efa220efd114d47 | [
"Apache-2.0"
] | 428 | 2017-10-21T01:32:58.000Z | 2020-11-07T13:49:49.000Z | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import Enum
from typing import Union, List
import numpy as np
from rl_coach.filters.observation.observation_move_axis_filter import ObservationMoveAxisFilter
try:
from pysc2 import maps
from pysc2.env import sc2_env
from pysc2.env import available_actions_printer
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.env import environment
from absl import app
from absl import flags
except ImportError:
from rl_coach.logger import failed_imports
failed_imports.append("PySc2")
from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection
from rl_coach.base_parameters import VisualizationParameters
from rl_coach.spaces import BoxActionSpace, VectorObservationSpace, PlanarMapsObservationSpace, StateSpace, CompoundActionSpace, \
DiscreteActionSpace
from rl_coach.filters.filter import InputFilter, OutputFilter
from rl_coach.filters.observation.observation_rescale_to_size_filter import ObservationRescaleToSizeFilter
from rl_coach.filters.action.linear_box_to_box_map import LinearBoxToBoxMap
from rl_coach.filters.observation.observation_to_uint8_filter import ObservationToUInt8Filter
FLAGS = flags.FLAGS
FLAGS(['coach.py'])
SCREEN_SIZE = 84 # will also impact the action space size
# Starcraft Constants
_NOOP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
class StarcraftObservationType(Enum):
Features = 0
RGB = 1
StarcraftInputFilter = InputFilter(is_a_reference_filter=True)
StarcraftInputFilter.add_observation_filter('screen', 'move_axis', ObservationMoveAxisFilter(0, -1))
StarcraftInputFilter.add_observation_filter('screen', 'rescaling',
ObservationRescaleToSizeFilter(
PlanarMapsObservationSpace(np.array([84, 84, 1]),
low=0, high=255, channels_axis=-1)))
StarcraftInputFilter.add_observation_filter('screen', 'to_uint8', ObservationToUInt8Filter(0, 255))
StarcraftInputFilter.add_observation_filter('minimap', 'move_axis', ObservationMoveAxisFilter(0, -1))
StarcraftInputFilter.add_observation_filter('minimap', 'rescaling',
ObservationRescaleToSizeFilter(
PlanarMapsObservationSpace(np.array([64, 64, 1]),
low=0, high=255, channels_axis=-1)))
StarcraftInputFilter.add_observation_filter('minimap', 'to_uint8', ObservationToUInt8Filter(0, 255))
StarcraftNormalizingOutputFilter = OutputFilter(is_a_reference_filter=True)
StarcraftNormalizingOutputFilter.add_action_filter(
'normalization', LinearBoxToBoxMap(input_space_low=-SCREEN_SIZE / 2, input_space_high=SCREEN_SIZE / 2 - 1))
class StarCraft2EnvironmentParameters(EnvironmentParameters):
def __init__(self, level=None):
super().__init__(level=level)
self.screen_size = 84
self.minimap_size = 64
self.feature_minimap_maps_to_use = range(7)
self.feature_screen_maps_to_use = range(17)
self.observation_type = StarcraftObservationType.Features
self.disable_fog = False
self.auto_select_all_army = True
self.default_input_filter = StarcraftInputFilter
self.default_output_filter = StarcraftNormalizingOutputFilter
self.use_full_action_space = False
@property
def path(self):
return 'rl_coach.environments.starcraft2_environment:StarCraft2Environment'
# Environment
class StarCraft2Environment(Environment):
def __init__(self, level: LevelSelection, frame_skip: int, visualization_parameters: VisualizationParameters,
target_success_rate: float=1.0, seed: Union[None, int]=None, human_control: bool=False,
custom_reward_threshold: Union[int, float]=None,
screen_size: int=84, minimap_size: int=64,
feature_minimap_maps_to_use: List=range(7), feature_screen_maps_to_use: List=range(17),
observation_type: StarcraftObservationType=StarcraftObservationType.Features,
disable_fog: bool=False, auto_select_all_army: bool=True,
use_full_action_space: bool=False, **kwargs):
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters,
target_success_rate)
self.screen_size = screen_size
self.minimap_size = minimap_size
self.feature_minimap_maps_to_use = feature_minimap_maps_to_use
self.feature_screen_maps_to_use = feature_screen_maps_to_use
self.observation_type = observation_type
self.features_screen_size = None
self.feature_minimap_size = None
self.rgb_screen_size = None
self.rgb_minimap_size = None
if self.observation_type == StarcraftObservationType.Features:
self.features_screen_size = screen_size
self.feature_minimap_size = minimap_size
elif self.observation_type == StarcraftObservationType.RGB:
self.rgb_screen_size = screen_size
self.rgb_minimap_size = minimap_size
self.disable_fog = disable_fog
self.auto_select_all_army = auto_select_all_army
self.use_full_action_space = use_full_action_space
# step_mul is the equivalent to frame skipping. Not sure if it repeats actions in between or not though.
self.env = sc2_env.SC2Env(map_name=self.env_id, step_mul=frame_skip,
visualize=self.is_rendered,
agent_interface_format=sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(
screen=self.features_screen_size,
minimap=self.feature_minimap_size
)
# rgb_dimensions=sc2_env.Dimensions(
# screen=self.rgb_screen_size,
# minimap=self.rgb_screen_size
# )
),
# feature_screen_size=self.features_screen_size,
# feature_minimap_size=self.feature_minimap_size,
# rgb_screen_size=self.rgb_screen_size,
# rgb_minimap_size=self.rgb_screen_size,
disable_fog=disable_fog,
random_seed=self.seed
)
# print all the available actions
# self.env = available_actions_printer.AvailableActionsPrinter(self.env)
self.reset_internal_state(True)
"""
feature_screen: [height_map, visibility_map, creep, power, player_id, player_relative, unit_type, selected,
unit_hit_points, unit_hit_points_ratio, unit_energy, unit_energy_ratio, unit_shields,
unit_shields_ratio, unit_density, unit_density_aa, effects]
feature_minimap: [height_map, visibility_map, creep, camera, player_id, player_relative, selecte
d]
player: [player_id, minerals, vespene, food_cap, food_army, food_workers, idle_worker_dount,
army_count, warp_gate_count, larva_count]
"""
self.screen_shape = np.array(self.env.observation_spec()[0]['feature_screen'])
self.screen_shape[0] = len(self.feature_screen_maps_to_use)
self.minimap_shape = np.array(self.env.observation_spec()[0]['feature_minimap'])
self.minimap_shape[0] = len(self.feature_minimap_maps_to_use)
self.state_space = StateSpace({
"screen": PlanarMapsObservationSpace(shape=self.screen_shape, low=0, high=255, channels_axis=0),
"minimap": PlanarMapsObservationSpace(shape=self.minimap_shape, low=0, high=255, channels_axis=0),
"measurements": VectorObservationSpace(self.env.observation_spec()[0]["player"][0])
})
if self.use_full_action_space:
action_identifiers = list(self.env.action_spec()[0].functions)
num_action_identifiers = len(action_identifiers)
action_arguments = [(arg.name, arg.sizes) for arg in self.env.action_spec()[0].types]
sub_action_spaces = [DiscreteActionSpace(num_action_identifiers)]
for argument in action_arguments:
for dimension in argument[1]:
sub_action_spaces.append(DiscreteActionSpace(dimension))
self.action_space = CompoundActionSpace(sub_action_spaces)
else:
self.action_space = BoxActionSpace(2, 0, self.screen_size - 1, ["X-Axis, Y-Axis"],
default_action=np.array([self.screen_size/2, self.screen_size/2]))
self.target_success_rate = target_success_rate
def _update_state(self):
timestep = 0
self.screen = self.last_result[timestep].observation.feature_screen
# extract only the requested segmentation maps from the observation
self.screen = np.take(self.screen, self.feature_screen_maps_to_use, axis=0)
self.minimap = self.last_result[timestep].observation.feature_minimap
self.measurements = self.last_result[timestep].observation.player
self.reward = self.last_result[timestep].reward
self.done = self.last_result[timestep].step_type == environment.StepType.LAST
self.state = {
'screen': self.screen,
'minimap': self.minimap,
'measurements': self.measurements
}
def _take_action(self, action):
if self.use_full_action_space:
action_identifier = action[0]
action_arguments = action[1:]
action = actions.FunctionCall(action_identifier, action_arguments)
else:
coord = np.array(action[0:2])
noop = False
coord = coord.round()
coord = np.clip(coord, 0, SCREEN_SIZE - 1)
self.last_action_idx = coord
if noop:
action = actions.FunctionCall(_NOOP, [])
else:
action = actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord])
self.last_result = self.env.step(actions=[action])
def _restart_environment_episode(self, force_environment_reset=False):
# reset the environment
self.last_result = self.env.reset()
# select all the units on the screen
if self.auto_select_all_army:
self.env.step(actions=[actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])])
def get_rendered_image(self):
screen = np.squeeze(np.tile(np.expand_dims(self.screen, -1), (1, 1, 3)))
screen = screen / np.max(screen) * 255
return screen.astype('uint8')
def dump_video_of_last_episode(self):
from rl_coach.logger import experiment_path
self.env._run_config.replay_dir = experiment_path
self.env.save_replay('replays')
super().dump_video_of_last_episode()
def get_target_success_rate(self):
return self.target_success_rate
| 48.162055 | 130 | 0.665737 |
e0fafc0a2a53a7f59a2f6ffba546962afa43dc9b | 1,679 | py | Python | test/device/test_SM_adb_remote.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 57 | 2018-02-20T08:16:47.000Z | 2022-03-28T10:36:57.000Z | test/device/test_SM_adb_remote.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 377 | 2018-07-19T11:56:27.000Z | 2021-07-09T13:08:12.000Z | test/device/test_SM_adb_remote.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 24 | 2018-04-14T20:49:40.000Z | 2022-03-29T10:44:26.000Z | __author__ = 'Grzegorz Latuszek'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = 'grzegorz.latuszek@nokia.com'
import pytest
from moler.util.devices_SM import iterate_over_device_states, get_device
def test_adb_remote_device(device_connection, adb_remote_output):
adb_remote = get_device(name="ADB_REMOTE", connection=device_connection, device_output=adb_remote_output,
test_file_path=__file__)
iterate_over_device_states(device=adb_remote)
@pytest.fixture
def adb_remote_output():
output = {
"UNIX_LOCAL": {
'TERM=xterm-mono ssh -l remote_login -o ServerAliveInterval=7 -o ServerAliveCountMax=2 remote_host': 'remote#',
'su': 'local_root_prompt'
},
"UNIX_LOCAL_ROOT": {
'exit': 'moler_bash#'
},
"UNIX_REMOTE": {
'exit': 'moler_bash#',
'su': 'remote_root_prompt',
'adb -s f57e6b77 shell': 'shell@adbhost:/ $', # adb shell is changing prompt so it triggers following 2 send-responses
'': 'shell@adbhost:/ $', # to allow for self.connection.sendline("") in _send_prompt_set()
'export PS1="adb_shell@f57e6b77 \\$ "': 'adb_shell@f57e6b77 $' # to allow for self.connection.sendline(self.set_prompt) in _send_prompt_set()
},
"ADB_SHELL": {
'exit': 'remote#',
'su': 'adb_shell@f57e6b77 #',
},
"ADB_SHELL_ROOT": {
'exit': 'adb_shell@f57e6b77 $',
},
"UNIX_REMOTE_ROOT": {
'exit': 'remote#',
},
}
return output
| 35.723404 | 155 | 0.583681 |
6e9cec9e32382f645a7ca983891ff3724e55139e | 459 | py | Python | esp/migrations/0003_auto_20161101_2006.py | amillar2/light-django | bea10041bef2fc3d833634a2a0a44e6af8733a21 | [
"MIT"
] | null | null | null | esp/migrations/0003_auto_20161101_2006.py | amillar2/light-django | bea10041bef2fc3d833634a2a0a44e6af8733a21 | [
"MIT"
] | null | null | null | esp/migrations/0003_auto_20161101_2006.py | amillar2/light-django | bea10041bef2fc3d833634a2a0a44e6af8733a21 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-02 01:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('esp', '0002_auto_20160519_2114'),
]
operations = [
migrations.AlterField(
model_name='pwm',
name='name',
field=models.CharField(default='light', max_length=20),
),
]
| 21.857143 | 67 | 0.6122 |
807f76e8b311efe150bc2015bd0a2e90a39379dc | 11,992 | py | Python | src/storage-blob-preview/azext_storage_blob_preview/tests/latest/test_storage_account_scenarios.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 2 | 2021-03-24T21:06:20.000Z | 2021-03-24T21:07:58.000Z | src/storage-blob-preview/azext_storage_blob_preview/tests/latest/test_storage_account_scenarios.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 3 | 2020-05-27T20:16:26.000Z | 2020-07-23T19:46:49.000Z | src/storage-blob-preview/azext_storage_blob_preview/tests/latest/test_storage_account_scenarios.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 5 | 2020-05-09T17:47:09.000Z | 2020-10-01T19:52:06.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
from azure.cli.testsdk import (ScenarioTest, LocalContextScenarioTest, JMESPathCheck, ResourceGroupPreparer,
StorageAccountPreparer, api_version_constraint, live_only, LiveScenarioTest)
from azure.cli.core.profiles import ResourceType
from ..storage_test_util import StorageScenarioMixin
from knack.util import CLIError
from datetime import datetime, timedelta
from azure_devtools.scenario_tests import AllowLargeResponse
class BlobServicePropertiesTests(StorageScenarioMixin, ScenarioTest):
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix='cli_storage_account_update_change_feed')
@StorageAccountPreparer(kind='StorageV2', name_prefix='clitest', location="eastus2euap")
def test_storage_account_update_change_feed(self, resource_group, storage_account):
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account blob-service-properties update'
})
from azure.cli.core.azclierror import InvalidArgumentValueError
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed false --change-feed-retention-days 14600 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --change-feed-retention-days 1 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days -1 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 0 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 146001 -n {sa} -g {rg}')
result = self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], True)
self.assertEqual(result['changeFeed']['retentionInDays'], 1)
result = self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 100 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], True)
self.assertEqual(result['changeFeed']['retentionInDays'], 100)
result = self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 14600 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], True)
self.assertEqual(result['changeFeed']['retentionInDays'], 14600)
result = self.cmd('{cmd} --enable-change-feed false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], False)
self.assertEqual(result['changeFeed']['retentionInDays'], None)
@ResourceGroupPreparer(name_prefix='cli_storage_account_update_delete_retention_policy')
@StorageAccountPreparer(kind='StorageV2')
def test_storage_account_update_delete_retention_policy(self, resource_group, storage_account):
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account blob-service-properties update'
})
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention false --delete-retention-days 365 -n {sa} -g {rg}').get_output_in_json()
with self.assertRaises(SystemExit):
self.cmd('{cmd} --delete-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true --delete-retention-days -1 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 0 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 366 -n {sa} -g {rg}')
result = self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['deleteRetentionPolicy']['days'], 1)
result = self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 100 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['deleteRetentionPolicy']['days'], 100)
result = self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 365 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['deleteRetentionPolicy']['days'], 365)
result = self.cmd('{cmd} --enable-delete-retention false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], False)
self.assertEqual(result['deleteRetentionPolicy']['days'], None)
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix="cli_test_sa_versioning")
@StorageAccountPreparer(location="eastus2euap", kind="StorageV2")
def test_storage_account_update_versioning(self):
result = self.cmd('storage account blob-service-properties update --enable-versioning true -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], True)
result = self.cmd('storage account blob-service-properties update --enable-versioning false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], False)
result = self.cmd('storage account blob-service-properties update --enable-versioning -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], True)
result = self.cmd('storage account blob-service-properties show -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], True)
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix='cli_storage_account_update_delete_retention_policy')
@StorageAccountPreparer(kind='StorageV2', name_prefix='clitest', location='eastus2euap')
def test_storage_account_update_container_delete_retention_policy(self, resource_group, storage_account):
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account blob-service-properties update'
})
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention false --container-delete-retention-days 365 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --container-delete-retention-days 1 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days -1 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 0 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 366 -n {sa} -g {rg}')
result = self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], 1)
result = self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 100 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], 100)
result = self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 365 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], 365)
result = self.cmd('{cmd} --enable-container-delete-retention false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], False)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], None)
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix="cli_test_sa_versioning")
@StorageAccountPreparer(location="eastus2euap", kind="StorageV2")
def test_storage_account_update_last_access(self):
result = self.cmd('storage account blob-service-properties update --enable-last-access-tracking true -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['lastAccessTimeTrackingPolicy']['enable'], True)
result = self.cmd(
'storage account blob-service-properties show -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['lastAccessTimeTrackingPolicy']['enable'], True)
self.assertEqual(result['lastAccessTimeTrackingPolicy']['name'], "AccessTimeTracking")
self.assertEqual(result['lastAccessTimeTrackingPolicy']['trackingGranularityInDays'], 1)
self.assertEqual(result['lastAccessTimeTrackingPolicy']['blobType'][0], "blockBlob")
result = self.cmd('storage account blob-service-properties update --enable-last-access-tracking false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['lastAccessTimeTrackingPolicy'], None)
result = self.cmd('storage account blob-service-properties update --enable-last-access-tracking -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['lastAccessTimeTrackingPolicy']['enable'], True)
result = self.cmd('storage account blob-service-properties show -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['lastAccessTimeTrackingPolicy']['enable'], True)
@ResourceGroupPreparer()
@StorageAccountPreparer(kind="StorageV2")
def test_storage_account_default_service_properties(self):
from azure.cli.core.azclierror import InvalidArgumentValueError
self.cmd('storage account blob-service-properties show -n {sa} -g {rg}', checks=[
self.check('defaultServiceVersion', None)])
with self.assertRaisesRegexp(InvalidArgumentValueError, 'Valid example: 2008-10-27'):
self.cmd('storage account blob-service-properties update --default-service-version 2018 -n {sa} -g {rg}')
self.cmd('storage account blob-service-properties update --default-service-version 2018-11-09 -n {sa} -g {rg}',
checks=[self.check('defaultServiceVersion', '2018-11-09')])
self.cmd('storage account blob-service-properties show -n {sa} -g {rg}',
checks=[self.check('defaultServiceVersion', '2018-11-09')])
| 60.261307 | 150 | 0.684456 |
58bcea4952e2ac314b452d3e2c38ffc3573c3549 | 183 | py | Python | packages/postgres-database/src/simcore_postgres_database/settings.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 25 | 2018-04-13T12:44:12.000Z | 2022-03-12T15:01:17.000Z | packages/postgres-database/src/simcore_postgres_database/settings.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 2,553 | 2018-01-18T17:11:55.000Z | 2022-03-31T16:26:40.000Z | packages/postgres-database/src/simcore_postgres_database/settings.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | 20 | 2018-01-18T19:45:33.000Z | 2022-03-29T07:08:47.000Z | from . import metadata
from .utils import build_url
# Schemas metadata (pre-loaded in __init__.py)
target_metadatas = [
metadata,
]
__all__ = ["target_metadatas", "build_url"]
| 16.636364 | 46 | 0.73224 |
378be6550a789a55f3a3be9fcb3eb3f8dc279e1e | 5,035 | py | Python | api/main.py | ziegeer/autocert | 285df181508573918e280948e51cdd7c65743281 | [
"MIT"
] | null | null | null | api/main.py | ziegeer/autocert | 285df181508573918e280948e51cdd7c65743281 | [
"MIT"
] | null | null | null | api/main.py | ziegeer/autocert | 285df181508573918e280948e51cdd7c65743281 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import imp
import pwd
import sys
from flask import Flask, request, jsonify, make_response
from pdb import set_trace as breakpoint
from pprint import pformat
from endpoint.factory import create_endpoint
from utils.version import get_version as get_api_version
from utils.format import fmt
from utils.exceptions import AutocertError
from app import app
STATUS_CODES = {
400: 'bad request',
401: 'unauthorized',
402: 'payment required',
403: 'forbidden',
404: 'not found',
405: 'method not allowed',
406: 'not acceptable',
407: 'proxy authentication required',
408: 'request timed-out',
409: 'conflict',
410: 'gone',
411: 'length required',
412: 'precondition failed',
413: 'payload too large',
414: 'uri too long',
415: 'unsupported media type',
416: 'range not satisfiable',
417: 'expectation failed',
418: 'im a teapot',
421: 'misdirected request',
422: 'unprocessable entity',
423: 'locked',
424: 'failed dependency',
426: 'upgrade required',
428: 'precondition required',
429: 'too many requires',
431: 'request header fields too large',
451: 'unavailable for legal reasons',
500: 'internal server error',
501: 'not implemented',
502: 'bad gateway',
503: 'service unavailable',
504: 'gateway timed out',
505: 'http version not supported',
506: 'variant also negotiates',
507: 'insufficient storage',
508: 'loop detected',
510: 'not extended',
511: 'network authentication required',
}
class EmptyJsonError(AutocertError):
def __init__(self, json):
message = fmt('empty json error ={0}', json)
super(EmptyJsonError, self).__init__(message)
@app.before_first_request
def initialize():
from logging.config import dictConfig
from config import CFG
if sys.argv[0] != 'venv/bin/pytest':
dictConfig(CFG.logging) #3
PID = os.getpid()
PPID = os.getppid()
USER = pwd.getpwuid(os.getuid())[0]
app.logger.info(fmt('starting api with pid={PID}, ppid={PPID} by user={USER}'))
def log_request(user, hostname, ip, method, path, json):
app.logger.info(fmt('{user}@{hostname} from {ip} ran {method} {path} with json=\n"{json}"'))
@app.route('/autocert/version', methods=['GET'])
def version():
args = request.json
args = args if args else {}
cfg = args.get('cfg', None)
log_request(
args.get('user', 'unknown'),
args.get('hostname', 'unknown'),
request.remote_addr,
request.method,
request.path,
args)
version = get_api_version()
return jsonify({'version': version})
@app.route('/autocert/config', methods=['GET'])
def config():
args = request.json
args = args if args else {}
cfg = args.get('cfg', None)
log_request(
args.get('user', 'unknown'),
args.get('hostname', 'unknown'),
request.remote_addr,
request.method,
request.path,
args)
from config import _load_config
cfg = _load_config(fixup=False)
return jsonify({'config': cfg})
@app.route('/autocert', methods=['GET', 'PUT', 'POST', 'DELETE'])
def route():
args = request.json
args = args if args else {}
cfg = args.get('cfg', None)
log_request(
args.get('user', 'unknown'),
args.get('hostname', 'unknown'),
request.remote_addr,
request.method,
request.path,
args)
try:
endpoint = create_endpoint(request.method, cfg, args)
json, status = endpoint.execute()
except AutocertError as ae:
status = 500
json = dict(errors={ae.name: ae.message})
return make_response(jsonify(json), status)
if not json:
raise EmptyJsonError(json)
return make_response(jsonify(json), status)
@app.errorhandler(AutocertError)
def unhandled_error(ae):
import traceback
tb = traceback.format_exc()
app.logger.error(tb)
status = 500
json = dict(errors={ae.name: ae.message})
return make_reponse(jsonify(json), status)
@app.errorhandler(Exception)
def unhandled_exception(ex):
app.logger.error('unhandled exception', exc_info=True)
@app.errorhandler(400)
def bad_request(error):
return log_and_jsonify_error(400, error, request)
@app.errorhandler(401)
def unauthorized(error):
return log_and_jsonify_error(401, error, request)
@app.errorhandler(403)
def page_not_found(error):
return log_and_jsonify_error(403, error, request)
@app.errorhandler(404)
def page_not_found(error):
return log_and_jsonify_error(404, error, request)
@app.errorhandler(405)
def method_not_allowed(error):
return log_and_jsonify_error(405, error, request)
@app.errorhandler(500)
def internal_server_error(error):
return log_jsonify_error(500, error, request)
@app.errorhandler(503)
def service_unavailable(error):
return log_jsonify_error(503, error, request)
if __name__ == '__main__':
app.run()
| 27.972222 | 96 | 0.660775 |
f23f28594607e2b2c7a5d03c92c370ee065d2416 | 7,516 | py | Python | tests/test_resources.py | FNNDSC/dacom | 78083a31d247a7907ff4fd935f64263222ae96a5 | [
"MIT"
] | null | null | null | tests/test_resources.py | FNNDSC/dacom | 78083a31d247a7907ff4fd935f64263222ae96a5 | [
"MIT"
] | null | null | null | tests/test_resources.py | FNNDSC/dacom | 78083a31d247a7907ff4fd935f64263222ae96a5 | [
"MIT"
] | null | null | null |
import logging
from pathlib import Path
import shutil
import os
import io
import time
import zipfile
from unittest import TestCase
from unittest import mock, skip
from flask import url_for
from pfcon.app import create_app
from pfcon.services import PmanService, ServiceException
class ResourceTests(TestCase):
"""
Base class for all the resource tests.
"""
def setUp(self):
# avoid cluttered console output (for instance logging all the http requests)
logging.disable(logging.WARNING)
self.app = create_app()
self.client = self.app.test_client()
with self.app.test_request_context():
# create a header with authorization token
pfcon_user = self.app.config.get('PFCON_USER')
pfcon_password = self.app.config.get('PFCON_PASSWORD')
url = url_for('api.auth')
response = self.client.post(url, data={'pfcon_user': pfcon_user,
'pfcon_password': pfcon_password})
self.headers = {'Authorization': 'Bearer ' + response.json['token']}
def tearDown(self):
# re-enable logging
logging.disable(logging.NOTSET)
class TestJobList(ResourceTests):
"""
Test the JobList resource.
"""
def setUp(self):
super().setUp()
self.job_dir = ''
with self.app.test_request_context():
self.url = url_for('api.joblist')
def tearDown(self):
if os.path.isdir(self.job_dir):
shutil.rmtree(self.job_dir)
super().tearDown()
def test_get(self):
response = self.client.get(self.url, headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertTrue('server_version' in response.json)
def test_post(self):
job_id = 'chris-jid-1'
self.job_dir = os.path.join('/var/local/storeBase', 'key-' + job_id)
# create zip data file
memory_zip_file = io.BytesIO()
with zipfile.ZipFile(memory_zip_file, 'w', zipfile.ZIP_DEFLATED) as job_data_zip:
job_data_zip.writestr('data.txt', 'test data')
memory_zip_file.seek(0)
data = {
'jid': job_id,
'cmd_args': '--saveinputmeta --saveoutputmeta --dir /share/incoming',
'auid': 'cube',
'number_of_workers': '1',
'cpu_limit': '1000',
'memory_limit': '200',
'gpu_limit': '0',
'image': 'fnndsc/pl-simplefsapp',
'selfexec': 'simplefsapp',
'selfpath': '/usr/local/bin',
'execshell': 'python3',
'type': 'fs',
'data_file': (memory_zip_file, 'data.txt.zip')
}
# make the POST request
response = self.client.post(self.url, data=data, headers=self.headers,
content_type='multipart/form-data')
self.assertEqual(response.status_code, 201)
self.assertIn('compute', response.json)
self.assertIn('data', response.json)
self.assertEqual(response.json['data']['nfiles'], 1)
with self.app.test_request_context():
pman = PmanService.get_service_obj()
for _ in range(10):
time.sleep(3)
d_compute_response = pman.get_job(job_id)
if d_compute_response['status'] == 'finishedSuccessfully': break
self.assertEqual(d_compute_response['status'], 'finishedSuccessfully')
# cleanup swarm job
pman.delete_job(job_id)
class TestJob(ResourceTests):
"""
Test the Job resource.
"""
def setUp(self):
super().setUp()
self.job_dir = ''
self.compute_data = {
'cmd_args': '--saveinputmeta --saveoutputmeta --dir cube',
'cmd_path_flags': '--dir,',
'auid': 'cube',
'number_of_workers': '1',
'cpu_limit': '1000',
'memory_limit': '200',
'gpu_limit': '0',
'image': 'fnndsc/pl-simplefsapp',
'selfexec': 'simplefsapp',
'selfpath': '/usr/local/bin',
'execshell': 'python3',
'type': 'fs'
}
def tearDown(self):
if os.path.isdir(self.job_dir):
shutil.rmtree(self.job_dir)
super().tearDown()
def test_get(self):
job_id = 'chris-jid-2'
self.job_dir = os.path.join('/var/local/storeBase', 'key-' + job_id)
incoming = os.path.join(self.job_dir, 'incoming')
Path(incoming).mkdir(parents=True, exist_ok=True)
outgoing = os.path.join(self.job_dir, 'outgoing')
Path(outgoing).mkdir(parents=True, exist_ok=True)
with open(os.path.join(incoming, 'test.txt'), 'w') as f:
f.write('job input test file')
with self.app.test_request_context():
# create job
url = url_for('api.job', job_id=job_id)
pman = PmanService.get_service_obj()
pman.run_job(job_id, self.compute_data)
# make the GET requests
for _ in range(10):
time.sleep(3)
response = self.client.get(url, headers=self.headers)
if response.json['compute']['status'] == 'finishedSuccessfully': break
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json['compute']['status'], 'finishedSuccessfully')
# cleanup swarm job
pman.delete_job(job_id)
def test_delete(self):
job_id = 'chris-jid-3'
self.job_dir = os.path.join('/var/local/storeBase', 'key-' + job_id)
incoming = os.path.join(self.job_dir, 'incoming')
Path(incoming).mkdir(parents=True, exist_ok=True)
outgoing = os.path.join(self.job_dir, 'outgoing')
Path(outgoing).mkdir(parents=True, exist_ok=True)
with open(os.path.join(incoming, 'test.txt'), 'w') as f:
f.write('job input test file')
with self.app.test_request_context():
# create job
url = url_for('api.job', job_id=job_id)
pman = PmanService.get_service_obj()
pman.run_job(job_id, self.compute_data)
# make the DELETE request
time.sleep(3)
response = self.client.delete(url, headers=self.headers)
self.assertEqual(response.status_code, 204)
class TestJobFile(ResourceTests):
"""
Test the JobFile resource.
"""
def setUp(self):
super().setUp()
self.job_dir = ''
def tearDown(self):
if os.path.isdir(self.job_dir):
shutil.rmtree(self.job_dir)
super().tearDown()
def test_get(self):
job_id = 'chris-jid-4'
self.job_dir = os.path.join('/var/local/storeBase', 'key-' + job_id)
with self.app.test_request_context():
url = url_for('api.jobfile', job_id=job_id)
outgoing = os.path.join(self.job_dir, 'outgoing')
Path(outgoing).mkdir(parents=True, exist_ok=True)
with open(os.path.join(outgoing, 'test.txt'), 'w') as f:
f.write('job input test file')
response = self.client.get(url, headers=self.headers)
self.assertEqual(response.status_code, 200)
memory_zip_file = io.BytesIO(response.data)
with zipfile.ZipFile(memory_zip_file, 'r', zipfile.ZIP_DEFLATED) as job_zip:
filenames = job_zip.namelist()
self.assertEqual(len(filenames), 1)
self.assertEqual(filenames[0], 'test.txt')
| 35.121495 | 89 | 0.588212 |
d22bfb1315602b6a4b82a0954c358bf1c3ea4f30 | 1,095 | py | Python | tools/bazel/setup.py | Falital/codechecker | b4a7958783a923998dd9d386e89af6411c8d593c | [
"Apache-2.0"
] | null | null | null | tools/bazel/setup.py | Falital/codechecker | b4a7958783a923998dd9d386e89af6411c8d593c | [
"Apache-2.0"
] | null | null | null | tools/bazel/setup.py | Falital/codechecker | b4a7958783a923998dd9d386e89af6411c8d593c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import setuptools
with open("README.md", "r", encoding="utf-8", errors="ignore") as fh:
long_description = fh.read()
setuptools.setup(
name="bazel-compile-commands",
version="0.1.0",
author='CodeChecker Team (Ericsson)',
description="Generate compilation database (compile_commands.json)"
" from bazel build.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Ericsson/CodeChecker",
keywords=['bazel', 'compile_commands.json', 'compilation database'],
license='LICENSE.txt',
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"Operating System :: MacOS",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
],
python_requires='>=3.6',
entry_points={
'console_scripts': [
'bazel-compile-commands = bazel_compile_commands.bazel_compile_commands:main'
]
},
)
| 31.285714 | 89 | 0.652968 |
1f74017c2c6ff386c963483953b8daf40c19ce19 | 395 | py | Python | fundamentals/14-advance-python-modules/5-datetime.py | davidokun/Python | 0172e4c6669dc0bdb1beab762948f0ade248bde0 | [
"MIT"
] | null | null | null | fundamentals/14-advance-python-modules/5-datetime.py | davidokun/Python | 0172e4c6669dc0bdb1beab762948f0ade248bde0 | [
"MIT"
] | null | null | null | fundamentals/14-advance-python-modules/5-datetime.py | davidokun/Python | 0172e4c6669dc0bdb1beab762948f0ade248bde0 | [
"MIT"
] | null | null | null | import datetime
# Create a time
t = datetime.time(9, 16, 10)
print(t)
# Get time
print(t.hour)
print(t.minute)
print(t.second)
# Dates
today = datetime.date.today()
print(today)
print(today.timetuple())
# Replace date
d1 = datetime.date(2016, 3, 11)
print(d1)
d2 = d1.replace(year=2018)
print(d2)
# Operations
d1 = datetime.date(1978, 6, 29)
d2 = datetime.date(2018, 6, 29)
print(d2 - d1)
| 14.107143 | 31 | 0.688608 |
929df4828a21413c61e8977d01b6c4f1ee7df282 | 9,567 | py | Python | model/sketch.nyu/models/network_seg_2d.py | denyingmxd/Torchssc | 9080c8aadd15d6ae6f7698ba64c6c2bae7393a24 | [
"MIT"
] | null | null | null | model/sketch.nyu/models/network_seg_2d.py | denyingmxd/Torchssc | 9080c8aadd15d6ae6f7698ba64c6c2bae7393a24 | [
"MIT"
] | null | null | null | model/sketch.nyu/models/network_seg_2d.py | denyingmxd/Torchssc | 9080c8aadd15d6ae6f7698ba64c6c2bae7393a24 | [
"MIT"
] | null | null | null | # encoding: utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleRB(nn.Module):
def __init__(self, in_channel, norm_layer, bn_momentum):
super(SimpleRB, self).__init__()
self.path = nn.Sequential(
nn.Conv3d(in_channel, in_channel, kernel_size=3, padding=1, bias=False),
norm_layer(in_channel, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(in_channel, in_channel, kernel_size=3, padding=1, bias=False),
norm_layer(in_channel, momentum=bn_momentum),
)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
conv_path = self.path(x)
out = residual + conv_path
out = self.relu(out)
return out
class Bottleneck3D(nn.Module):
def __init__(self, inplanes, planes, norm_layer, stride=1, dilation=[1, 1, 1], expansion=4, downsample=None,
fist_dilation=1, multi_grid=1,
bn_momentum=0.0003):
super(Bottleneck3D, self).__init__()
self.expansion = expansion
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes, momentum=bn_momentum)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=(1, 1, 3), stride=(1, 1, stride),
dilation=(1, 1, dilation[0]), padding=(0, 0, dilation[0]), bias=False)
self.bn2 = norm_layer(planes, momentum=bn_momentum)
self.conv3 = nn.Conv3d(planes, planes, kernel_size=(1, 3, 1), stride=(1, stride, 1),
dilation=(1, dilation[1], 1), padding=(0, dilation[1], 0), bias=False)
self.bn3 = norm_layer(planes, momentum=bn_momentum)
self.conv4 = nn.Conv3d(planes, planes, kernel_size=(3, 1, 1), stride=(stride, 1, 1),
dilation=(dilation[2], 1, 1), padding=(dilation[2], 0, 0), bias=False)
self.bn4 = norm_layer(planes, momentum=bn_momentum)
self.conv5 = nn.Conv3d(planes, planes * self.expansion, kernel_size=(1, 1, 1), bias=False)
self.bn5 = norm_layer(planes * self.expansion, momentum=bn_momentum)
self.relu = nn.ReLU(inplace=False)
self.relu_inplace = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
self.downsample2 = nn.Sequential(
nn.AvgPool3d(kernel_size=(1, stride, 1), stride=(1, stride, 1)),
nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False),
norm_layer(planes, momentum=bn_momentum),
)
self.downsample3 = nn.Sequential(
nn.AvgPool3d(kernel_size=(stride, 1, 1), stride=(stride, 1, 1)),
nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False),
norm_layer(planes, momentum=bn_momentum),
)
self.downsample4 = nn.Sequential(
nn.AvgPool3d(kernel_size=(stride, 1, 1), stride=(stride, 1, 1)),
nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False),
norm_layer(planes, momentum=bn_momentum),
)
def forward(self, x):
residual = x
out1 = self.relu(self.bn1(self.conv1(x)))
out2 = self.bn2(self.conv2(out1))
out2_relu = self.relu(out2)
out3 = self.bn3(self.conv3(out2_relu))
if self.stride != 1:
out2 = self.downsample2(out2)
out3 = out3 + out2
out3_relu = self.relu(out3)
out4 = self.bn4(self.conv4(out3_relu))
if self.stride != 1:
out2 = self.downsample3(out2)
out3 = self.downsample4(out3)
out4 = out4 + out2 + out3
out4_relu = self.relu(out4)
out5 = self.bn5(self.conv5(out4_relu))
if self.downsample is not None:
residual = self.downsample(x)
out = out5 + residual
out_relu = self.relu(out)
return out_relu
class STAGE2(nn.Module):
def __init__(self, class_num, norm_layer, resnet_out=2048, feature=512, ThreeDinit=True,
bn_momentum=0.1, pretrained_model=None, eval=False, freeze_bn=False):
super(STAGE2, self).__init__()
self.business_layer = []
self.resnet_out = resnet_out
self.feature = feature
self.ThreeDinit = ThreeDinit
self.pooling = nn.AvgPool3d(kernel_size=3, padding=1, stride=1)
self.business_layer.append(self.pooling)
self.semantic_layer1 = nn.Sequential(
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, expansion=4, stride=2, downsample=
nn.Sequential(
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(feature, feature,
kernel_size=1, stride=1, bias=False),
norm_layer(feature, momentum=bn_momentum),
), norm_layer=norm_layer),
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[1, 1, 1]),
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[2, 2, 2]),
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[3, 3, 3]),
)
self.business_layer.append(self.semantic_layer1)
self.semantic_layer2 = nn.Sequential(
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, expansion=8, stride=2, downsample=
nn.Sequential(
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(feature, feature * 2,
kernel_size=1, stride=1, bias=False),
norm_layer(feature * 2, momentum=bn_momentum),
), norm_layer=norm_layer),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[1, 1, 1]),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[2, 2, 2]),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[3, 3, 3]),
)
self.business_layer.append(self.semantic_layer2)
self.classify_semantic = nn.ModuleList([
nn.Sequential(
nn.ConvTranspose3d(feature * 2, feature, kernel_size=3, stride=2, padding=1, dilation=1,
output_padding=1),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
),
nn.Sequential(
nn.ConvTranspose3d(feature, feature, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
),
nn.Sequential(
nn.Dropout3d(.1),
nn.Conv3d(feature, class_num, kernel_size=1, bias=True)
),
nn.Sequential(
nn.Dropout3d(.1),
nn.Conv3d(feature, 2, kernel_size=1, bias=True)
)]
)
self.business_layer.append(self.classify_semantic)
self.oper_raw = nn.Sequential(
nn.Conv3d(12, 3, kernel_size=3, padding=1, bias=False),
norm_layer(3, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(3, 64, kernel_size=3, padding=1, bias=False),
norm_layer(64, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(64, feature, kernel_size=3, padding=1, bias=False),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
)
self.business_layer.append(self.oper_raw)
def forward(self, seg_2d):
edge_rgb= self.oper_raw(seg_2d)
seg_fea = edge_rgb
semantic1 = self.semantic_layer1(seg_fea) + F.interpolate(seg_fea, size=[30, 18, 30])
semantic2 = self.semantic_layer2(semantic1)
up_sem1 = self.classify_semantic[0](semantic2)
up_sem1 = up_sem1 + semantic1
up_sem2 = self.classify_semantic[1](up_sem1)
up_sem2 = up_sem2 + F.interpolate(up_sem1, size=[60, 36, 60], mode="trilinear", align_corners=True)
pred_semantic = self.classify_semantic[2](up_sem2)
results = {'pred_semantic': pred_semantic}
return results
class Network_seg_2d(nn.Module):
def __init__(self, class_num, norm_layer, resnet_out=2048, feature=512, ThreeDinit=True,
bn_momentum=0.1, pretrained_model=None, eval=False, freeze_bn=False):
super(Network_seg_2d, self).__init__()
self.business_layer = []
self.dilate = 2
self.stage2 = STAGE2(class_num, norm_layer, resnet_out=resnet_out, feature=feature, ThreeDinit=ThreeDinit,
bn_momentum=bn_momentum, pretrained_model=pretrained_model, eval=eval, freeze_bn=freeze_bn)
self.business_layer += self.stage2.business_layer
def forward(self,img, depth_mapping_3d, tsdf, sketch_gt,seg_2d):
results= self.stage2(seg_2d)
return results
def _nostride_dilate(self, m, dilate):
if isinstance(m, nn.Conv2d):
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
if __name__ == '__main__':
pass
| 43.094595 | 120 | 0.598829 |
e364ba1b872799f2f1f243b623c62023c5db34fe | 518 | py | Python | v2/ansible/roles/bngsudheer.opendkim/molecule/default/tests/test_default.py | jonfairbanks/rtsp-nvr | c770c77e74a062c63fb5e2419bc00a17543da332 | [
"MIT"
] | 558 | 2017-10-04T14:33:18.000Z | 2022-03-24T21:25:08.000Z | v2/ansible/roles/bngsudheer.opendkim/molecule/default/tests/test_default.py | jonfairbanks/rtsp-nvr | c770c77e74a062c63fb5e2419bc00a17543da332 | [
"MIT"
] | 22 | 2018-04-29T04:25:49.000Z | 2021-08-02T17:26:02.000Z | v2/ansible/roles/bngsudheer.opendkim/molecule/default/tests/test_default.py | jonfairbanks/rtsp-nvr | c770c77e74a062c63fb5e2419bc00a17543da332 | [
"MIT"
] | 127 | 2017-11-14T19:47:27.000Z | 2022-03-24T21:25:12.000Z | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_opendkim_is_installed(host):
opendkim = host.package("opendkim")
assert opendkim.is_installed
def test_default_key_exists(host):
host.file("/etc/opendkim/keys/default.txt").exists
| 21.583333 | 63 | 0.733591 |
3653c5e79afcf424535d23a1ab30337be8757613 | 28 | py | Python | plugins/pelican-cite/__init__.py | mohnjahoney/website_source | edc86a869b90ae604f32e736d9d5ecd918088e6a | [
"MIT"
] | 13 | 2020-01-27T09:02:25.000Z | 2022-01-20T07:45:26.000Z | plugins/pelican-cite/__init__.py | mohnjahoney/website_source | edc86a869b90ae604f32e736d9d5ecd918088e6a | [
"MIT"
] | 29 | 2020-03-22T06:57:57.000Z | 2022-01-24T22:46:42.000Z | plugins/pelican-cite/__init__.py | mohnjahoney/website_source | edc86a869b90ae604f32e736d9d5ecd918088e6a | [
"MIT"
] | 6 | 2020-07-10T00:13:30.000Z | 2022-01-26T08:22:33.000Z | from .pelican_cite import *
| 14 | 27 | 0.785714 |
3f0d5c4f1a5d58931463792fa80851254994760d | 2,148 | py | Python | python/examples/postgresql.py | spk/language-examples | 0c283e9ad2a5a70631458acc3275074176c1621b | [
"MIT"
] | 4 | 2020-02-22T14:39:21.000Z | 2021-03-01T20:17:38.000Z | python/examples/postgresql.py | spk/language-examples | 0c283e9ad2a5a70631458acc3275074176c1621b | [
"MIT"
] | 21 | 2019-02-21T22:37:41.000Z | 2021-09-20T16:19:40.000Z | python/examples/postgresql.py | spk/language-examples | 0c283e9ad2a5a70631458acc3275074176c1621b | [
"MIT"
] | 6 | 2019-09-14T16:03:50.000Z | 2021-03-01T19:39:09.000Z | import psycopg2
from platformshconfig import Config
def usage_example():
# Create a new Config object to ease reading the Platform.sh environment variables.
# You can alternatively use os.environ yourself.
config = Config()
# The 'database' relationship is generally the name of primary SQL database of an application.
# That's not required, but much of our default automation code assumes it.' \
database = config.credentials('postgresql')
try:
# Connect to the database.
conn_params = {
'host': database['host'],
'port': database['port'],
'dbname': database['path'],
'user': database['username'],
'password': database['password']
}
conn = psycopg2.connect(**conn_params)
# Open a cursor to perform database operations.
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS People")
# Creating a table.
sql = '''
CREATE TABLE IF NOT EXISTS People (
id SERIAL PRIMARY KEY,
name VARCHAR(30) NOT NULL,
city VARCHAR(30) NOT NULL
)
'''
cur.execute(sql)
# Insert data.
sql = '''
INSERT INTO People (name, city) VALUES
('Neil Armstrong', 'Moon'),
('Buzz Aldrin', 'Glen Ridge'),
('Sally Ride', 'La Jolla');
'''
cur.execute(sql)
# Show table.
sql = '''SELECT * FROM People'''
cur.execute(sql)
result = cur.fetchall()
table = '''<table>
<thead>
<tr><th>Name</th><th>City</th></tr>
</thead>
<tbody>'''
if result:
for record in result:
table += '''<tr><td>{0}</td><td>{1}</td><tr>\n'''.format(record[1], record[2])
table += '''</tbody>\n</table>\n'''
# Drop table
sql = "DROP TABLE People"
cur.execute(sql)
# Close communication with the database
cur.close()
conn.close()
return table
except Exception as e:
return e
| 26.85 | 98 | 0.524674 |
ec3ee4983825335e201c03a8d7472a539d9b9aa6 | 6,411 | py | Python | feature_engineering_functions.py | Martina385/Kickstarter | 992ddbfd3aa01eb45befba04e7ede634ec011630 | [
"MIT"
] | null | null | null | feature_engineering_functions.py | Martina385/Kickstarter | 992ddbfd3aa01eb45befba04e7ede634ec011630 | [
"MIT"
] | null | null | null | feature_engineering_functions.py | Martina385/Kickstarter | 992ddbfd3aa01eb45befba04e7ede634ec011630 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import time
import datetime as dt
import json
from pathlib import Path
import pickle
import os, glob
def string_len_w(string):
'''Return length of string (number of word, seperated by Space).'''
string_str = str(string)
string_list = string_str.split()
string_len = len(string_list)
return string_len
def add_blurb_len_w (df):
'''Adding column that contains the length of the Blurb (words) and returns the updated Dataframe'''
df['blurb_len_w'] = df.apply(lambda x: string_len_w(x['blurb']), axis=1)
return df
def string_len_slug_w(string):
'''Returns length of string (number of words, seperated by "-").'''
string_str = str(string)
string_list = string_str.split("-")
string_len = len(string_list)
return string_len
def add_slug_len_w (df):
'''Adding column that contains the length of the Slug (words) and returns the updated Dataframe'''
df['slug_len_w'] = df.apply(lambda x: string_len_slug_w(x['slug']), axis=1)
return df
def add_parent_id(df):
'''Extracts Parent ID out of the Category json and adds the Column to Dataframe. Returns updated Dataframe'''
df['category_parent_id'] = pd.DataFrame([json.loads(df["category"][i]).get("parent_id") for i in range(df.shape[0])])
return df
def add_category_id(df):
'''Extracts category ID out of the Category json and adds the Column to Dataframe. Returns updated Dataframe'''
df['category_id'] = pd.DataFrame([json.loads(df["category"][i]).get("id") for i in range(df.shape[0])])
return df
def add_category_name(df):
'''Extracts category name out of the Category json and adds the Column to Dataframe. Returns updated Dataframe'''
df['category_name'] = pd.DataFrame([json.loads(df["category"][i]).get("name") for i in range(df.shape[0])])
return df
def fill_na(df, column_name):
'''Fill Missings with 0 as type integer. Returns updated dataframe. eg, for parent ID and pledged per backer'''
df[column_name] = df[column_name].fillna(0).astype("int")
return df
# Making a list based on entry in one category and if missing adds entry of another Column
def helper_list():
'''Making a list based on entry in one category and if missing adds entry of another Column'''
empty = []
for i in range(df.shape[0]):
if df["category_parent_id"][i] != 0:
empty.append(df["category_parent_id"][i])
else:
empty.append(df["category_id"][i])
return empty
# adds helper list as column to dataframe
def add_list_as_column(df, column_name, list_name):
'''Adds helper list as column to dataframe and retruns updated dataframe'''
df[column_name] = pd.DataFrame(list_name)
return df
def add_parent_name(df, column_name1, column_name2, dictionary):
'''based on key value in a column, column with value is added as a column and updated dataframe is returned.
Example:
parents_dict = {1: "Art", 3: "Comics", 6: "Dance", 7: "Design", 9: "Fashion", 10: "Food",
11: "Film & Video", 12: "Games", 13: "Journalism", 14: "Music", 15: "Photography", 16: "Technology",
17: "Theater", 18: "Publishing", 26: "Crafts"}
df["parent_name"] = df["filled_parent"].apply(lambda x: parents_dict.get(x))'''
df[column_name1] = df[column_name2].apply(lambda x: dictionary.get(x))
return df
#funtion to extract the month out of the number
def extract_month(number):
'''Extracts the month out of the number and returns the month'''
gmtime = time.gmtime(number)
return gmtime[1]
# Adding column with month the project was launched
def adding_month_launched(df):
'''Adding column with month the project was launched and returns the updated dataframe'''
df["launched_month"] = df.apply(lambda x: extract_month(x["launched_at"]), axis=1)
return df
def duration(deadline, launched_at):
'''Calculating difference between two timepoints and returns it in days'''
duration = deadline - launched_at
duration_complete = dt.timedelta(seconds=duration)
return duration_complete.days
# Adding column with duration in days
def adding_duration(df):
'''Adding column with duration in days and returns updated dataframe'''
df["duration_days"] = df.apply(lambda x: duration(x["deadline"], x["launched_at"]), axis=1)
return df
def adding_preparation(df):
'''Adding column with preparation in days and returns updated dataframe'''
df["preparation"] = df.apply(lambda x: duration(x["launched_at"], x["created_at"]), axis=1)
return df
def adding_pledged_per_backer(df):
'''Adding column that is the averaged amount pledged per backer, returns updated dataframe'''
df['pledged_per_backer'] = (df['usd_pledged'] / df['backers_count']).round(2)
return df
def usd_convert_goal(df, column_name, exchange_rate):
'''Converts a Column based on given exchange rate, rounds it to two decimal spaces
and returns updated dataframe, e.g.
df['goal'] = (df['goal'] * df['static_usd_rate']).round(2)'''
df[column_name] = (df[column_name] * df[exchange_rate]).round(2)
return df
def drop_rows_missings(df, column_name):
'''Drop rows with missing values in column, eg. Blurb. Retruns dataframe.'''
df.dropna(subset = [column_name], inplace=True)
return df
def drop_duplicates(df, column_name):
'''Creating dataframe and dropping all duplicates, based on a column_name (eg, ID)
and keep the last ("newest") duplicate'''
df = df.drop_duplicates(subset=['id'], keep='last')
return df
# drop rows with values certain values in a dataframe and returns updated dataframe, eg 'suspended' and 'live' in column 'state'
def drop_rows_value (df, column_name, value):
'''drop rows with values certain values in a dataframe and returns updated dataframe'''
df = df.drop(df[df[column_name] == value ].index)
return df
def drop_columns(df, list_columns):
'''Drops columns in the list and returns updated datadrame'''
df.drop(list_columns, axis=1, inplace=True)
return df
def convert_to_int(df, column_name):
'''Converting Column type to Integer and returns updated df'''
df[column_name] = df[column_name].astype("int")
return df | 41.901961 | 128 | 0.70301 |
348c5f7883ea14f5c64aeb8245049b678cbeda8b | 1,057 | py | Python | src/array/hour_glass_sum.py | vasanthonline/python-algos | 7bbdd3bf72894388f4026f4cf9f610d6a4158b8f | [
"Apache-2.0"
] | null | null | null | src/array/hour_glass_sum.py | vasanthonline/python-algos | 7bbdd3bf72894388f4026f4cf9f610d6a4158b8f | [
"Apache-2.0"
] | null | null | null | src/array/hour_glass_sum.py | vasanthonline/python-algos | 7bbdd3bf72894388f4026f4cf9f610d6a4158b8f | [
"Apache-2.0"
] | null | null | null | #!/bin/python3
'''
To take a 6x6 array and find the sum of each hour glass from the array.
Return the maximum sum of the sums from the hour glasses.
:param arr: The input 2-dimensional 6x6 array
:type arr: 6x6 array
:return: The maximum sum of the sums from the hour glasses.
:rtype: int
'''
def hourGlassSum(arr):
sums = []
for i in range(0, len(arr)):
sub = arr[i]
for j in range(0, len(sub)):
if(j+2 < len(sub) and i+2 < len(arr)):
sums.append(sub[j] + sub[j+1] + sub[j+2] + arr[i+1][j+1] + arr[i+2][j] + arr[i+2][j+1] + arr[i+2][j+2])
return max(sums)
arr = []
arr = [[-9,-9,-9,1,1,1],
[0,-9,0,4,3,2],
[-9,-9,-9,1,2,3],
[0,0,8,6,6,0],
[0,0,0,-2,0,0],
[0,0,1,2,4,0]]
# arr = [[1, 2, 3, 4, 5, 6], [2, 3, 4, 5, 6, 7], [3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9], [5, 6, 7, 8, 9, 0], [6, 7, 8, 9, 0, 1]]
# for _ in range(6):
# arr.append(list(map(int, list(input("Please enter the {} row of 6 digits:".format(_ + 1)).rstrip()))))
print("Maximum sum of hour glass for the array is: {}".format(hourGlassSum(arr))) | 33.03125 | 128 | 0.555345 |
b1c2bbf2150af5659cacccccf1de976c11448048 | 4,219 | py | Python | libraries/unified-model/unified_model/cli_handler.py | felixridinger/machine-learning-lab | 410e2f5fecb7ea91dcec12a5b9cb9161331191bf | [
"Apache-2.0",
"MIT"
] | 55 | 2020-08-28T12:26:15.000Z | 2022-02-01T08:57:26.000Z | libraries/unified-model/unified_model/cli_handler.py | felixridinger/machine-learning-lab | 410e2f5fecb7ea91dcec12a5b9cb9161331191bf | [
"Apache-2.0",
"MIT"
] | 38 | 2020-09-01T17:17:22.000Z | 2022-03-31T15:44:57.000Z | libraries/unified-model/unified_model/cli_handler.py | felixridinger/machine-learning-lab | 410e2f5fecb7ea91dcec12a5b9cb9161331191bf | [
"Apache-2.0",
"MIT"
] | 19 | 2020-08-31T16:38:09.000Z | 2022-03-09T13:59:58.000Z | from __future__ import absolute_import, print_function
import logging
import sys
import click
from unified_model import compatibility_utils
from unified_model import model_handler
from unified_model.server import api_server
MODEL_PATH = click.option("--model-path", "-m", metavar="PATH", required=False, type=click.Path(exists=True),
help="Path to the unified model. If not provided, it assumes that it is started from within a model.")
log = logging.getLogger(__name__)
@click.group()
@click.version_option()
def cli():
# log to sys out
logging.basicConfig(stream=sys.stdout, format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def serve(model_path: str = None, port: int = None, host: str = None):
"""Serve a Unified Model via a REST API server."""
try:
model_handler.init(default_model_key=model_path)
except:
log.exception("Failed to initialize model handler.")
api_server.run(port=port, host=host)
@click.command("serve")
@MODEL_PATH
@click.option("--port", "-p", default=5000, required=False, type=click.IntRange(1, 65535),
help="Server port. [default: 5000]")
@click.option("--host", "-h", default="127.0.0.1", required=False, type=click.STRING,
help="Server host. [default: localhost]")
def _serve_cli(model_path, port, host):
serve(model_path, port, host)
def predict(model_path: str = None, input_data=None, input_path: str = None, output_path: str = None, **kwargs):
"""Make a prediction on the given data item."""
try:
# TODO add predict batch support?
# TODO real logging
model_handler.init(default_model_key=model_path)
log.info(model_handler.predict(input_data, **kwargs))
except:
log.exception("Failed predict with model.")
@click.command("predict",
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True))
@MODEL_PATH
@click.option("--input-data", "-d", help="Provide the input data as an cli argument.",
required=False)
@click.option("--input-path", "-i", help="Input file containing the data to predict against.",
required=False, type=click.Path(exists=True))
@click.option("--output-path", "-o",
help="Results will be output to this file. If not provided, stdout will be used.",
type=click.Path(exists=False), required=False)
@click.pass_context
def _predict_cli(ctx, model_path, input_data, input_path, output_path):
"""Make a prediction on the given data item."""
# Allow additional arguments to be used in predict
kwargs = {}
for item in ctx.args:
item = str(item)
if item.startswith("--") and "=" in item:
arg_split = item.replace("--", "").split("=")
kwargs[arg_split[0].strip()] = arg_split[1].strip()
predict(model_path, input_data, input_path, output_path, **kwargs)
def convert(model_path: str, model_format: str, output_path: str):
"""Convert a Unified Model into another format."""
try:
model_handler.init(default_model_key=model_path)
if model_format == 'pex':
compatibility_utils.convert_to_pex(model_handler.get_model(), output_path)
elif model_format == "mlflow":
compatibility_utils.convert_to_mlflow(model_handler.get_model(), output_path)
elif model_format == 'pipelineai':
compatibility_utils.convert_to_pipelineai(model_handler.get_model(), output_path)
except:
log.exception("Failed to convert model.")
@click.command("convert")
@MODEL_PATH
@click.option("--model-format", "-f", required=True, type=click.Choice(['pex', 'mlflow', 'pipelineai']),
help="The format to convert the model to.")
@click.option("--output-path", "-o",
help="Output path were the converted model is saved to.",
type=click.Path(exists=False), required=True)
def _convert_cli(model_path, model_format, output_path):
convert(model_path, model_format, output_path)
cli.add_command(_convert_cli)
cli.add_command(_predict_cli)
cli.add_command(_serve_cli)
if __name__ == '__main__':
cli()
| 36.686957 | 128 | 0.67196 |
3ae435f6d3d5b666093236c9bb32ae8850a6c948 | 180 | py | Python | Python/math/test_plotting.py | AlexLemna/learns | 7282d93f4c6ed7692443ca826ca97a2709f23e41 | [
"Unlicense"
] | null | null | null | Python/math/test_plotting.py | AlexLemna/learns | 7282d93f4c6ed7692443ca826ca97a2709f23e41 | [
"Unlicense"
] | null | null | null | Python/math/test_plotting.py | AlexLemna/learns | 7282d93f4c6ed7692443ca826ca97a2709f23e41 | [
"Unlicense"
] | null | null | null | import matplotlib.pyplot as pt
some_numbers = [1, 2, 3, 5.5, 8, 22, 3, 7, 7, 21]
other_numbers = [0, 3, 3, 14, 9, 9, 4.7, 3, 5, 7]
pt.plot(some_numbers, other_numbers)
pt.show()
| 22.5 | 49 | 0.627778 |
7aa5d390ab4f9e8f73bb4ed1df44ba55c4330e64 | 4,755 | py | Python | wavefront_api_client/models/response_container_user_api_token.py | PowerOlive/python-client | eebda67381fcf893914c309103878236b609a70b | [
"Apache-2.0"
] | 11 | 2016-05-30T17:16:45.000Z | 2021-06-11T19:32:59.000Z | wavefront_api_client/models/response_container_user_api_token.py | PowerOlive/python-client | eebda67381fcf893914c309103878236b609a70b | [
"Apache-2.0"
] | 25 | 2016-05-02T23:05:19.000Z | 2020-11-18T22:43:20.000Z | wavefront_api_client/models/response_container_user_api_token.py | PowerOlive/python-client | eebda67381fcf893914c309103878236b609a70b | [
"Apache-2.0"
] | 30 | 2016-04-29T17:17:11.000Z | 2022-02-11T04:58:37.000Z | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class ResponseContainerUserApiToken(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'response': 'UserApiToken',
'status': 'ResponseStatus'
}
attribute_map = {
'response': 'response',
'status': 'status'
}
def __init__(self, response=None, status=None, _configuration=None): # noqa: E501
"""ResponseContainerUserApiToken - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._response = None
self._status = None
self.discriminator = None
if response is not None:
self.response = response
self.status = status
@property
def response(self):
"""Gets the response of this ResponseContainerUserApiToken. # noqa: E501
:return: The response of this ResponseContainerUserApiToken. # noqa: E501
:rtype: UserApiToken
"""
return self._response
@response.setter
def response(self, response):
"""Sets the response of this ResponseContainerUserApiToken.
:param response: The response of this ResponseContainerUserApiToken. # noqa: E501
:type: UserApiToken
"""
self._response = response
@property
def status(self):
"""Gets the status of this ResponseContainerUserApiToken. # noqa: E501
:return: The status of this ResponseContainerUserApiToken. # noqa: E501
:rtype: ResponseStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ResponseContainerUserApiToken.
:param status: The status of this ResponseContainerUserApiToken. # noqa: E501
:type: ResponseStatus
"""
if self._configuration.client_side_validation and status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResponseContainerUserApiToken, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResponseContainerUserApiToken):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResponseContainerUserApiToken):
return True
return self.to_dict() != other.to_dict()
| 31.490066 | 409 | 0.610936 |
7d9e3048b8e13f4e0c06a13ccf9f675c7c0feebc | 12,332 | py | Python | sdk/python/pulumi_azure_nextgen/servicefabric/v20200101preview/get_node_type.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/servicefabric/v20200101preview/get_node_type.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/servicefabric/v20200101preview/get_node_type.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNodeTypeResult',
'AwaitableGetNodeTypeResult',
'get_node_type',
]
@pulumi.output_type
class GetNodeTypeResult:
"""
Describes a node type in the cluster, each node type represents sub set of nodes in the cluster.
"""
def __init__(__self__, application_ports=None, capacities=None, data_disk_size_gb=None, ephemeral_ports=None, is_primary=None, name=None, placement_properties=None, provisioning_state=None, tags=None, type=None, vm_extensions=None, vm_image_offer=None, vm_image_publisher=None, vm_image_sku=None, vm_image_version=None, vm_instance_count=None, vm_secrets=None, vm_size=None):
if application_ports and not isinstance(application_ports, dict):
raise TypeError("Expected argument 'application_ports' to be a dict")
pulumi.set(__self__, "application_ports", application_ports)
if capacities and not isinstance(capacities, dict):
raise TypeError("Expected argument 'capacities' to be a dict")
pulumi.set(__self__, "capacities", capacities)
if data_disk_size_gb and not isinstance(data_disk_size_gb, int):
raise TypeError("Expected argument 'data_disk_size_gb' to be a int")
pulumi.set(__self__, "data_disk_size_gb", data_disk_size_gb)
if ephemeral_ports and not isinstance(ephemeral_ports, dict):
raise TypeError("Expected argument 'ephemeral_ports' to be a dict")
pulumi.set(__self__, "ephemeral_ports", ephemeral_ports)
if is_primary and not isinstance(is_primary, bool):
raise TypeError("Expected argument 'is_primary' to be a bool")
pulumi.set(__self__, "is_primary", is_primary)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if placement_properties and not isinstance(placement_properties, dict):
raise TypeError("Expected argument 'placement_properties' to be a dict")
pulumi.set(__self__, "placement_properties", placement_properties)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if vm_extensions and not isinstance(vm_extensions, list):
raise TypeError("Expected argument 'vm_extensions' to be a list")
pulumi.set(__self__, "vm_extensions", vm_extensions)
if vm_image_offer and not isinstance(vm_image_offer, str):
raise TypeError("Expected argument 'vm_image_offer' to be a str")
pulumi.set(__self__, "vm_image_offer", vm_image_offer)
if vm_image_publisher and not isinstance(vm_image_publisher, str):
raise TypeError("Expected argument 'vm_image_publisher' to be a str")
pulumi.set(__self__, "vm_image_publisher", vm_image_publisher)
if vm_image_sku and not isinstance(vm_image_sku, str):
raise TypeError("Expected argument 'vm_image_sku' to be a str")
pulumi.set(__self__, "vm_image_sku", vm_image_sku)
if vm_image_version and not isinstance(vm_image_version, str):
raise TypeError("Expected argument 'vm_image_version' to be a str")
pulumi.set(__self__, "vm_image_version", vm_image_version)
if vm_instance_count and not isinstance(vm_instance_count, int):
raise TypeError("Expected argument 'vm_instance_count' to be a int")
pulumi.set(__self__, "vm_instance_count", vm_instance_count)
if vm_secrets and not isinstance(vm_secrets, list):
raise TypeError("Expected argument 'vm_secrets' to be a list")
pulumi.set(__self__, "vm_secrets", vm_secrets)
if vm_size and not isinstance(vm_size, str):
raise TypeError("Expected argument 'vm_size' to be a str")
pulumi.set(__self__, "vm_size", vm_size)
@property
@pulumi.getter(name="applicationPorts")
def application_ports(self) -> Optional['outputs.EndpointRangeDescriptionResponse']:
"""
The range of ports from which cluster assigned port to Service Fabric applications.
"""
return pulumi.get(self, "application_ports")
@property
@pulumi.getter
def capacities(self) -> Optional[Mapping[str, str]]:
"""
The capacity tags applied to the nodes in the node type, the cluster resource manager uses these tags to understand how much resource a node has.
"""
return pulumi.get(self, "capacities")
@property
@pulumi.getter(name="dataDiskSizeGB")
def data_disk_size_gb(self) -> int:
"""
Disk size for each vm in the node type in GBs.
"""
return pulumi.get(self, "data_disk_size_gb")
@property
@pulumi.getter(name="ephemeralPorts")
def ephemeral_ports(self) -> Optional['outputs.EndpointRangeDescriptionResponse']:
"""
The range of ephemeral ports that nodes in this node type should be configured with.
"""
return pulumi.get(self, "ephemeral_ports")
@property
@pulumi.getter(name="isPrimary")
def is_primary(self) -> bool:
"""
The node type on which system services will run. Only one node type should be marked as primary. Primary node type cannot be deleted or changed for existing clusters.
"""
return pulumi.get(self, "is_primary")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="placementProperties")
def placement_properties(self) -> Optional[Mapping[str, str]]:
"""
The placement tags applied to nodes in the node type, which can be used to indicate where certain services (workload) should run.
"""
return pulumi.get(self, "placement_properties")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the managed cluster resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vmExtensions")
def vm_extensions(self) -> Optional[Sequence['outputs.VMSSExtensionResponse']]:
"""
Set of extensions that should be installed onto the virtual machines.
"""
return pulumi.get(self, "vm_extensions")
@property
@pulumi.getter(name="vmImageOffer")
def vm_image_offer(self) -> Optional[str]:
"""
The offer type of the Azure Virtual Machines Marketplace image. For example, UbuntuServer or WindowsServer.
"""
return pulumi.get(self, "vm_image_offer")
@property
@pulumi.getter(name="vmImagePublisher")
def vm_image_publisher(self) -> Optional[str]:
"""
The publisher of the Azure Virtual Machines Marketplace image. For example, Canonical or MicrosoftWindowsServer.
"""
return pulumi.get(self, "vm_image_publisher")
@property
@pulumi.getter(name="vmImageSku")
def vm_image_sku(self) -> Optional[str]:
"""
The SKU of the Azure Virtual Machines Marketplace image. For example, 14.04.0-LTS or 2012-R2-Datacenter.
"""
return pulumi.get(self, "vm_image_sku")
@property
@pulumi.getter(name="vmImageVersion")
def vm_image_version(self) -> Optional[str]:
"""
The version of the Azure Virtual Machines Marketplace image. A value of 'latest' can be specified to select the latest version of an image. If omitted, the default is 'latest'.
"""
return pulumi.get(self, "vm_image_version")
@property
@pulumi.getter(name="vmInstanceCount")
def vm_instance_count(self) -> int:
"""
The number of nodes in the node type.
"""
return pulumi.get(self, "vm_instance_count")
@property
@pulumi.getter(name="vmSecrets")
def vm_secrets(self) -> Optional[Sequence['outputs.VaultSecretGroupResponse']]:
"""
The secrets to install in the virtual machines.
"""
return pulumi.get(self, "vm_secrets")
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> Optional[str]:
"""
The size of virtual machines in the pool. All virtual machines in a pool are the same size. For example, Standard_D3.
"""
return pulumi.get(self, "vm_size")
class AwaitableGetNodeTypeResult(GetNodeTypeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNodeTypeResult(
application_ports=self.application_ports,
capacities=self.capacities,
data_disk_size_gb=self.data_disk_size_gb,
ephemeral_ports=self.ephemeral_ports,
is_primary=self.is_primary,
name=self.name,
placement_properties=self.placement_properties,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
vm_extensions=self.vm_extensions,
vm_image_offer=self.vm_image_offer,
vm_image_publisher=self.vm_image_publisher,
vm_image_sku=self.vm_image_sku,
vm_image_version=self.vm_image_version,
vm_instance_count=self.vm_instance_count,
vm_secrets=self.vm_secrets,
vm_size=self.vm_size)
def get_node_type(cluster_name: Optional[str] = None,
node_type_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNodeTypeResult:
"""
Use this data source to access information about an existing resource.
:param str cluster_name: The name of the cluster resource.
:param str node_type_name: The name of the node type.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['nodeTypeName'] = node_type_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicefabric/v20200101preview:getNodeType', __args__, opts=opts, typ=GetNodeTypeResult).value
return AwaitableGetNodeTypeResult(
application_ports=__ret__.application_ports,
capacities=__ret__.capacities,
data_disk_size_gb=__ret__.data_disk_size_gb,
ephemeral_ports=__ret__.ephemeral_ports,
is_primary=__ret__.is_primary,
name=__ret__.name,
placement_properties=__ret__.placement_properties,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
vm_extensions=__ret__.vm_extensions,
vm_image_offer=__ret__.vm_image_offer,
vm_image_publisher=__ret__.vm_image_publisher,
vm_image_sku=__ret__.vm_image_sku,
vm_image_version=__ret__.vm_image_version,
vm_instance_count=__ret__.vm_instance_count,
vm_secrets=__ret__.vm_secrets,
vm_size=__ret__.vm_size)
| 42.524138 | 379 | 0.674505 |
4a594674f143afda1e1604cf798d5b3ad76b7426 | 18 | py | Python | cadastro.py | EderBevacqua/DevOps | 4fd5f72c5362a8ab3845321fc898f43804fd3cd0 | [
"Apache-2.0"
] | 2 | 2021-02-10T21:28:26.000Z | 2021-08-18T19:52:28.000Z | cadastro.py | EderBevacqua/DevOps | 4fd5f72c5362a8ab3845321fc898f43804fd3cd0 | [
"Apache-2.0"
] | null | null | null | cadastro.py | EderBevacqua/DevOps | 4fd5f72c5362a8ab3845321fc898f43804fd3cd0 | [
"Apache-2.0"
] | null | null | null | print("Olá mundo") | 18 | 18 | 0.722222 |
7757da58bf599d4ae7661c1076d3a285fbee6ef2 | 2,360 | py | Python | examples/teleop_example.py | chstetco/assistive-gym | cc797c86051ed4d09688d60ce9f378b884547264 | [
"MIT"
] | 216 | 2019-10-11T01:33:50.000Z | 2022-03-28T23:24:58.000Z | examples/teleop_example.py | chstetco/assistive-gym | cc797c86051ed4d09688d60ce9f378b884547264 | [
"MIT"
] | 19 | 2019-12-18T03:28:56.000Z | 2021-11-10T22:02:01.000Z | examples/teleop_example.py | chstetco/assistive-gym | cc797c86051ed4d09688d60ce9f378b884547264 | [
"MIT"
] | 62 | 2019-10-12T00:42:43.000Z | 2022-03-28T01:26:05.000Z | import gym, assistive_gym
import pybullet as p
import numpy as np
env = gym.make('FeedingSawyer-v1')
env.render()
observation = env.reset()
# Map keys to position and orientation end effector movements
pos_keys_actions = {ord('j'): np.array([-0.01, 0, 0]), ord('l'): np.array([0.01, 0, 0]),
ord('u'): np.array([0, -0.01, 0]), ord('o'): np.array([0, 0.01, 0]),
ord('k'): np.array([0, 0, -0.01]), ord('i'): np.array([0, 0, 0.01])}
rpy_keys_actions = {ord('k'): np.array([-0.05, 0, 0]), ord('i'): np.array([0.05, 0, 0]),
ord('u'): np.array([0, -0.05, 0]), ord('o'): np.array([0, 0.05, 0]),
ord('j'): np.array([0, 0, -0.05]), ord('l'): np.array([0, 0, 0.05])}
start_pos, orient = env.robot.get_pos_orient(env.robot.right_end_effector)
start_rpy = env.get_euler(orient)
target_pos_offset = np.zeros(3)
target_rpy_offset = np.zeros(3)
while True:
keys = p.getKeyboardEvents()
# Process position movement keys ('u', 'i', 'o', 'j', 'k', 'l')
for key, action in pos_keys_actions.items():
if p.B3G_SHIFT not in keys and key in keys and keys[key] & p.KEY_IS_DOWN:
target_pos_offset += action
# Process rpy movement keys (shift + movement keys)
for key, action in rpy_keys_actions.items():
if p.B3G_SHIFT in keys and keys[p.B3G_SHIFT] & p.KEY_IS_DOWN and (key in keys and keys[key] & p.KEY_IS_DOWN):
target_rpy_offset += action
# print('Target position offset:', target_pos_offset, 'Target rpy offset:', target_rpy_offset)
target_pos = start_pos + target_pos_offset
target_rpy = start_rpy + target_rpy_offset
# Use inverse kinematics to compute the joint angles for the robot's arm
# so that its end effector moves to the target position.
target_joint_angles = env.robot.ik(env.robot.right_end_effector, target_pos, env.get_quaternion(target_rpy), env.robot.right_arm_ik_indices, max_iterations=200, use_current_as_rest=True)
# Get current joint angles of the robot's arm
current_joint_angles = env.robot.get_joint_angles(env.robot.right_arm_joint_indices)
# Compute the action as the difference between target and current joint angles.
action = (target_joint_angles - current_joint_angles) * 10
# Step the simulation forward
observation, reward, done, info = env.step(action)
| 50.212766 | 190 | 0.664831 |
613ae120ce50360d8de3b55e30df0d1f50c7d421 | 4,011 | py | Python | shaker/tests/test_agent.py | mail2nsrajesh/shaker | 610d47957481b8018a0fb38a47a634a9c7fbc6cc | [
"Apache-2.0"
] | null | null | null | shaker/tests/test_agent.py | mail2nsrajesh/shaker | 610d47957481b8018a0fb38a47a634a9c7fbc6cc | [
"Apache-2.0"
] | null | null | null | shaker/tests/test_agent.py | mail2nsrajesh/shaker | 610d47957481b8018a0fb38a47a634a9c7fbc6cc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from shaker.agent import agent
class TestAgent(testtools.TestCase):
@mock.patch('shaker.agent.agent.sleep')
def test_work_act_idle(self, mock_sleep):
agent_id = 'the-agent'
polling_interval = 10
agent_config = dict(polling_interval=polling_interval)
mock_socket = mock.Mock()
mock_socket.recv_json.side_effect = [{}]
agent.work_act(mock_socket, agent_id, agent_config)
mock_sleep.assert_called_once_with(polling_interval)
mock_socket.send_json.assert_called_once_with(
dict(operation='poll', agent_id=agent_id))
mock_socket.recv_json.assert_called_once_with()
@mock.patch('shaker.agent.agent.sleep')
@mock.patch('shaker.agent.agent.run_command')
def test_work_act_execute(self, mock_run_command, mock_sleep):
agent_id = 'the-agent'
polling_interval = 10
agent_config = dict(polling_interval=polling_interval)
mock_socket = mock.Mock()
mock_socket.recv_json.side_effect = [
dict(operation='execute'), dict(),
]
execute_result = {'res': 'data'}
mock_run_command.return_value = execute_result
agent.work_act(mock_socket, agent_id, agent_config)
mock_sleep.assert_called_once_with(polling_interval)
mock_socket.send_json.assert_has_calls([
mock.call(dict(operation='poll', agent_id=agent_id)),
mock.call(dict(operation='reply', agent_id=agent_id, res='data')),
])
@mock.patch('shaker.agent.agent.sleep')
@mock.patch('shaker.agent.agent.time_now')
@mock.patch('shaker.agent.agent.run_command')
def test_work_act_schedule(self, mock_run_command, mock_now, mock_sleep):
agent_id = 'the-agent'
polling_interval = 10
start_at = 1234
now = 1230
agent_config = dict(polling_interval=polling_interval)
mock_socket = mock.Mock()
mock_socket.recv_json.side_effect = [
dict(operation='execute', start_at=start_at), dict(),
]
execute_result = {'res': 'data'}
mock_run_command.return_value = execute_result
mock_now.return_value = now
agent.work_act(mock_socket, agent_id, agent_config)
mock_sleep.assert_has_calls([
mock.call(start_at - now),
mock.call(polling_interval),
])
mock_socket.send_json.assert_has_calls([
mock.call(dict(operation='poll', agent_id=agent_id)),
mock.call(dict(operation='reply', agent_id=agent_id, res='data')),
])
@mock.patch('shaker.agent.agent.sleep')
@mock.patch('shaker.agent.agent.run_command')
def test_work_act_configure(self, mock_run_command, mock_sleep):
agent_id = 'the-agent'
new_polling_interval = 2
agent_config = dict(polling_interval=10)
mock_socket = mock.Mock()
mock_socket.recv_json.side_effect = [
dict(operation='configure', polling_interval=new_polling_interval),
dict(),
]
agent.work_act(mock_socket, agent_id, agent_config)
mock_sleep.assert_has_calls([
mock.call(new_polling_interval),
])
mock_socket.send_json.assert_has_calls([
mock.call(dict(operation='poll', agent_id=agent_id)),
mock.call(dict(operation='reply', agent_id=agent_id)),
])
| 34.282051 | 79 | 0.670656 |
ce2981cdd5ab9a7b248f5afe572b170ecb7db9e1 | 414 | py | Python | CyberGuard_v2/secrets/secrets.py | CyberTriber/python | 3d9cffa4f6fc3a3348bc2ce8070c243f047927e1 | [
"MIT"
] | 1 | 2018-09-16T05:50:52.000Z | 2018-09-16T05:50:52.000Z | CyberGuard_v2/secrets/secrets.py | CyberTriber/python | 3d9cffa4f6fc3a3348bc2ce8070c243f047927e1 | [
"MIT"
] | null | null | null | CyberGuard_v2/secrets/secrets.py | CyberTriber/python | 3d9cffa4f6fc3a3348bc2ce8070c243f047927e1 | [
"MIT"
] | null | null | null | # coding=utf-8
# Firebase secrets
CONFIG = {
"apiKey": <firebase_api_key>,
"authDomain": "<project_name>.firebaseapp.com",
"databaseURL": "https://<project_name>.firebaseio.com",
"storageBucket": "<project_name>.appspot.com",
"serviceAccount": "./secrets/firebase.json"
}
FBEMAIL = '<name>@discord.app'
FBPASSWORD = <password>
# Discord secrets
BOT_TOKEN = <botTOKEN>
OWNER_ID = <bot_owner_discord_id> | 24.352941 | 57 | 0.714976 |
fb7f2ccee0a3ded2f34467d40e47bae199be3fce | 18,366 | py | Python | src/pyroofit/plotting.py | simonUU/PyrooFit | cacd0b7624a648276835c31e1a4b4d27284ef69f | [
"MIT"
] | 11 | 2018-09-06T11:37:10.000Z | 2021-08-25T07:15:38.000Z | src/pyroofit/plotting.py | simonUU/PyrooFit | cacd0b7624a648276835c31e1a4b4d27284ef69f | [
"MIT"
] | 7 | 2019-01-14T12:32:20.000Z | 2021-03-31T17:35:27.000Z | src/pyroofit/plotting.py | simonUU/PyrooFit | cacd0b7624a648276835c31e1a4b4d27284ef69f | [
"MIT"
] | 10 | 2018-10-22T13:53:22.000Z | 2022-01-24T11:56:17.000Z | # -*- coding: utf-8 -*-
""" Plot function for the PDF class
Tools to make nice plots from RooFit pdfs.
The function fast_plot is used by the PDF class
to make default plots.
Todo:
* Plotter class containing the RooFit frame
* Equal length ticks
* Provide matplolib functionality
"""
from __future__ import print_function
from .utilities import ClassLoggingMixin
import ROOT
DEFAULT_PALETTE = [1, ROOT.kRed - 7, ROOT.kAzure + 5, ROOT.kGreen-2, ROOT.kMagenta+1, ROOT.kYellow]
DEFAULT_STYLES = [0, 1001, 3004, 3005, 3009, 3006]
""" Default color pallette and draw style for ROOT.
"""
class Plotter(ClassLoggingMixin):
""" Experimental Plotter class
This function serves the purpose to create a RooFit frame without the need to interface
RooFit.
Todo:
* Adding plot pdf functionality
"""
def __init__(self, pdf, observable=None, nbins=20):
super(Plotter, self).__init__()
self.pdf = pdf
self.observable = observable if observable is not None else pdf.get_observable()
self.frame = None
self.create_frame()
self.nbins = nbins
def create_frame(self, title="Fit"):
self.frame = self.pdf.get_observable().frame(ROOT.RooFit.Title(title), ROOT.RooFit.Bins(self.nbins))
def get_optimal_bin_size(n, round=True):
"""Helper function to calculate optimal binning
This function calculates the optimal amount of bins for the number of events n.
Args:
n (int): number of events to be binned
round (bool or int): Round to
Returns:
(int): Optimal number of bins
"""
def roundtobase(n, base=5):
diff = n % base
if diff <= base / 2.:
return n - diff
else:
return n - diff + base
n_opt = int(2 * n**(1/3.0))
if round:
base = 5
if isinstance(round, int):
base = round
n_opt = roundtobase(n_opt, base)
if n_opt == 0:
n_opt = 1
return n_opt
def round_to_1(x):
from math import log10, floor
return round(x, -int(floor(log10(abs(x)))))
def set_root_style(font_scale=1.0, label_scale=1.0):
""" Setting a general style that one can look at plots without getting eye-cancer.
Args:
font_scale (float): Scale of the fonts
label_scale (float): Scale of the labels
Todo:
* Absolute font size
"""
ROOT.gStyle.SetOptTitle(0)
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetLabelSize(0.04*label_scale, "xy")
ROOT.gStyle.SetLabelOffset(0.006, "y")
ROOT.gStyle.SetTitleSize(0.06*font_scale, "xy")
ROOT.gStyle.SetTitleOffset(0.9, "x")
ROOT.gStyle.SetTitleOffset(1.15, "y")
ROOT.gStyle.SetNdivisions(505, "x")
ROOT.gStyle.SetPadLeftMargin(0.14)
ROOT.gStyle.SetPadRightMargin(0.05)
ROOT.gStyle.SetPadBottomMargin(0.12)
ROOT.gStyle.SetPadTopMargin(0.05)
ROOT.gStyle.SetFillColor(10)
ROOT.gStyle.SetMarkerSize(0.8)
ROOT.gStyle.SetLineColor(ROOT.kBlack)
ROOT.gStyle.SetLineWidth(1)
ROOT.gStyle.SetLegendBorderSize(0)
def fast_plot(model, data, observable, filename, components=None, nbins=None, extra_info=None, size=1280,
average=True, pi_label=False, font_scale=1.0, label_scale=1.0,
legend=False, extra_text=None, round_bins=5, tick_len=30, model_range="Full",
color_cycle=DEFAULT_PALETTE, fill_cycle=DEFAULT_STYLES, lw=2, line_shade=0, legend_data_name="Data", legend_fit_name="Fit",
):
""" Generic plot function
Args:
model (RooAbsPDF):
Fit model to be drawn
data (RooDataSet):
Dataset to be plotted
observable (RooAbsVar):
Observable to be drawn
filename (str):
Name of the output file. Suffix determines file type
components (list of tuples):
Normalisation and ROOT.RooAbsPDF to be drawn searately
nbins (int):
Number of bins
extra_info (list or TPaveText):
lw (int):
Width of the line of the total fit model
size (int):
Plot size in pixels
average (bool):
Average bin content for calculating the pull distribution, if false, take central value
pi_label (bool):
Calculate the bin count in radians
font_scale (float):
Set relative font scale
label_scale (float):
Set relative lable scale
color_cycle (list of ROOT.TColor):
Overwrite for the default color cycle
fill_cycle (list of ROOT.TAttrFill):
Overwrite for the default fill cycle
line_shade (int):
Integer to add to the color cycle for the fill color
legend (list):
Vector with four coordinates for the TLegend position
extra_text (list of ROOT.TPaveText or ROOT.TPaveText):
Extra text to be drawn on the plot
round_bins (int) :
magic to for automatically choosing the bin numbers
tick_len (int) :
Sets the length of the bins, EQUALLY (yes root. this is possible.), choose between 0-100
legend_data_name (str):
Name of the data part in the fit plot
legend_fit_name (str):
name of the total fit in the plot
Todo:
* Change or remove extra_info
"""
set_root_style(font_scale, label_scale)
nbins = get_optimal_bin_size(data.numEntries(), round_bins) if nbins is None else nbins
if isinstance(data, ROOT.RooDataHist):
nbins = observable.getBins()
frame = observable.frame(ROOT.RooFit.Title("Fit Result"), ROOT.RooFit.Bins(nbins))
if isinstance(legend, list):
assert len(legend) == 4, "Please provide four coordinates for the legend"
leg = ROOT.TLegend(*legend)
elif legend == "top left":
leg = ROOT.TLegend(0.16, 0.78, 0.39, 0.92)
else:
leg = ROOT.TLegend(0.7, 0.78, 0.93, 0.92)
data.plotOn(frame, ROOT.RooFit.Name("Data"), ROOT.RooFit.DataError(ROOT.RooAbsData.SumW2))
leg.AddEntry(frame.findObject("Data"), legend_data_name, "LEP")
model.plotOn(frame, ROOT.RooFit.Name("Model"), ROOT.RooFit.LineColor(color_cycle[0]), ROOT.RooFit.Range(model_range))
leg.AddEntry(frame.findObject("Model"), legend_fit_name, "L")
if components is not None:
n_col = 1
for c, ni in components:
c.plotOn(frame,
ROOT.RooFit.LineColor(color_cycle[n_col] + line_shade),
ROOT.RooFit.Normalization(ni, 2),
ROOT.RooFit.FillColor(color_cycle[n_col]),
ROOT.RooFit.FillStyle(fill_cycle[n_col]),
ROOT.RooFit.Name(c.GetName()),
ROOT.RooFit.DrawOption("F"),
ROOT.RooFit.VLines(), # if your pdf happens to not end on a point=0 you have to add this - obviously
ROOT.RooFit.Range(model_range),
)
leg.AddEntry(frame.findObject(c.GetName()), c.getTitle().Data())
c.plotOn(frame,
ROOT.RooFit.LineColor(color_cycle[n_col] + line_shade),
ROOT.RooFit.Normalization(ni, 2),
ROOT.RooFit.FillColor(color_cycle[n_col]),
ROOT.RooFit.LineWidth(lw),
ROOT.RooFit.Range(model_range),
) # ROOT.RooFit.DrawOption("F")) #4050
n_col += 1
model.plotOn(frame, ROOT.RooFit.Name("Model"), ROOT.RooFit.LineColor(color_cycle[0]))
data.plotOn(frame, ROOT.RooFit.Name("Data"), ROOT.RooFit.DataError(ROOT.RooAbsData.SumW2))
# Create Canvas
canvas = ROOT.TCanvas("plot", "plot", size, size)
canvas.Divide(1, 2)
canvas.GetPad(1).SetPad(0.0, 0.25, 1, 1)
canvas.GetPad(1).SetBottomMargin(0.02)
canvas.GetPad(1).SetRightMargin(0.05)
canvas.GetPad(1).SetTicks(1, 1)
canvas.GetPad(2).SetPad(0.0, 0.0, 1, 0.25)
canvas.GetPad(2).SetBottomMargin(0.36)
canvas.GetPad(2).SetTopMargin(0.0)
canvas.GetPad(2).SetRightMargin(0.05)
canvas.GetPad(2).SetTicks(1, 1)
# Pi label because of...
if pi_label:
pifactor = 1 if observable.getMax() > 1.9 else 2
ylabel = "Events / ( %.2f #pi rad )" % (1.0 / float(pifactor * nbins))
frame.SetYTitle(ylabel)
else:
obs_range = round_to_1(observable.getMax() - observable.getMin()) # stupid overflow artefacts
div = round(nbins/obs_range)
# print(div,obs_range,numbins)
unit = observable.getUnit()
if unit is not None or unit is not "":
ylabel = "Events / ( %s / %d )" % (observable.getUnit(), div)
# frame.SetYTitle(ylabel)
# Draw All The Stuff
canvas.cd(1)
frame.Draw()
if legend is not False:
leg.Draw("same")
# Draw Pull
canvas.cd(2)
pulls = frame.pullHist("Data", "Model", average)
plot_pulls = observable.frame(ROOT.RooFit.Name("Pull_distribution"),
ROOT.RooFit.Title("Pull distribution"),
ROOT.RooFit.Range("full_range"))
hist_pulls = ROOT.TH1F("hist_pulls", "hist pulls", pulls.GetN(),
# pulls.GetXaxis().GetXmin(), pulls.GetXaxis().GetXmax())
observable.getMin(model_range), observable.getMax(model_range))
# hist_pulls = ROOT.TH1F("hist_pulls", "hist pulls", nbins,
# observable.getMin("full_range"), observable.getMax("full_range"))
pull_values = pulls.GetY()
xerr = (observable.getMax("full_range") - observable.getMin("full_range")) / (2. * nbins) # numbins
for i in range(pulls.GetN()):
hist_pulls.SetBinContent(i + 1, pull_values[i])
pulls.SetPointEXlow(i, xerr)
pulls.SetPointEXhigh(i, xerr)
pulls.SetPointEYlow(i, 0)
pulls.SetPointEYhigh(i, 0)
pulls.SetMarkerSize(0)
plot_pulls.addPlotable(pulls, "PE1")
# Messy
plot_pulls.GetYaxis().SetTitle("Pull")
plot_pulls.GetYaxis().CenterTitle()
plot_pulls.GetXaxis().SetTitleSize(0.18)
plot_pulls.GetYaxis().SetTitleSize(0.18)
plot_pulls.GetYaxis().SetTitleOffset(0.39)
plot_pulls.GetXaxis().SetTitleOffset(.82)
# plot_pulls.GetXaxis().SetTitleOffset(0.2)
plot_pulls.GetXaxis().SetLabelSize(0.12 * label_scale)
plot_pulls.GetYaxis().SetLabelSize(0.12 * label_scale)
# plot_pulls.GetYaxis().SetLabelOffset(0.0)
plot_pulls.GetYaxis().SetLabelOffset(0.006)
# plot_pulls.GetXaxis().SetLabelOffset(0.06)
plot_pulls.GetXaxis().SetTickLength(plot_pulls.GetXaxis().GetTickLength() * 3.0)
plot_pulls.GetYaxis().SetNdivisions(505)
### Equal sized ticks!!
pad1 = canvas.GetPad(1)
pad2 = canvas.GetPad(2)
pad1W = pad1.GetWw() * pad1.GetAbsWNDC()
pad1H = pad1.GetWh() * pad1.GetAbsHNDC()
pad2W = pad2.GetWw() * pad2.GetAbsWNDC()
pad2H = pad2.GetWh() * pad2.GetAbsHNDC()
# print(pad1W, pad1H, pad2W, pad2H)
frame.SetTickLength(tick_len/pad1W, "Y")
frame.SetTickLength(tick_len/pad1H, "X")
plot_pulls.SetTickLength(tick_len/pad1H, "Y")
plot_pulls.SetTickLength(tick_len/pad2H, "X")
frame.GetXaxis().SetLabelOffset(999)
frame.GetXaxis().SetLabelSize(0)
# set reasonable limits for the pull plots
if hist_pulls.GetMaximum() > 3.5 or hist_pulls.GetMinimum() < -3.5:
plot_pulls.SetMinimum(-5.5)
plot_pulls.SetMaximum(5.5)
else:
plot_pulls.SetMinimum(-3.5)
plot_pulls.SetMaximum(3.5)
plot_pulls.SetMarkerStyle(6)
plot_pulls.SetMarkerSize(0)
plot_pulls.SetMarkerColor(1) # This has to be the worst solution
plot_pulls.Draw("")
if model_range is "Full":
hist_pulls.SetFillColor(33)
hist_pulls.SetLineColor(33)
hist_pulls.Draw("HISTsame")
plot_pulls.Draw("Xsame")
print("ttttteeessdst")
line = ROOT.TLine(observable.getMin('Full'), 0, observable.getMax("Full"), 0)
line.SetLineColor(1)
line.SetLineStyle(2)
line.Draw("same")
if extra_text is not None:
canvas.cd(1)
if isinstance(extra_text, ROOT.TPaveText):
extra_info.Draw("Same")
if isinstance(extra_text, list):
for txt in extra_text:
assert isinstance(txt, ROOT.TPaveText), "Please provide extra_txt with a list or ROOT.TPaveText"
txt.Draw("Same")
if extra_info is not None:
canvas.cd(1)
if isinstance(extra_info, ROOT.TPaveText):
extra_info.Draw("Same")
else:
assert isinstance(extra_info, list), "Please provide extra_info with a list or ROOT.TPaveText"
box = ROOT.TPaveText(0.2, 0.75, 0.4, 0.9, "NDC")
box.SetFillColor(10)
box.SetBorderSize(0)
box.SetTextAlign(12)
box.SetTextSize(0.04)
box.SetFillStyle(1001)
box.SetFillColor(10)
for info in extra_info:
try:
if not isinstance(info, list):
if isinstance(info, ROOT.TPaveText):
info.Draw('same')
else:
info = [info]
if len(info) == 1:
box.AddText(info[0])
elif len(info) == 3:
box.AddText(info[0] + ' = %.2f #pm %.2f' % (info[1], info[2]))
else:
print("Could not add to legend ", info)
except IndexError:
print("Something went wrong in plotting")
box.Draw("same")
canvas.SaveAs(filename)
def get_norm (nbins, N):
""" Returns normalistion that should be used for a PDF against a dataset of N entries, with nbins.
Args:
nbins(int): number of bins that data is binned in
N (int): number of entries in dataset
Returns:
norm (float): normalisation
"""
return N/nbins
def plot_as_pyplot(pdf, dataset, n_bins=50, dataset_name = 'Data',fit_name = 'Fit', x_name = "data", y_name = "Entries", unit = "", figsize=None, hatches=None, fcs=None):
""" Plots the PDF against the dataset using matplotlib.pyplot libraries
Args:
pdf(pyroofit.pdf.PDF): the fitted pdf to plot against a dataset
dataset(array-like): the dataset used to train the PDF
n_bins(int): number of bins for the dataset
dataset_name (string): name of the dataset that will appear on the legend, default = "Data"
fit_name (string): name of the fit that will appear on the legend, default = "Fit"
x_name (string): title of the x axis, default = "data"
y_name (string): first part of y axis title, appears as y_name/(bin_width unit), default = "Entries"
unit (string): the unit to appear on x and y axes, default = ""
figsize (tuple of float,float): size of plot as (fig_width,fig_height), if None, then (13,8.03), default = None
hatches (list of hatch patterns): list for hatches to be used (filling patterns for components)
fcs (list of fill colors): list for facecolors to be used (fills for components)
figsize (tuple of float,float): size of plot as (fig_width,fig_height), if None, then (13,8.03), default = None
Returns:
fig (Figure): matplotlib figure object
ax, ax_pull (array of axes.Axes): first axis contains the data/pdf plot, the second contains the pull distribution
"""
import matplotlib.pyplot as plt
import numpy as np
# Define golden ratio for sizes
golden = (1 + 5 ** 0.5) / 2
STYLES_facecolor = fcs if fcs else [None, 'none', 'none', 'none', 'none', 'none']
STYLES_hatches = hatches if hatches else [None, '///', r"\\\ ", 'xxx', '--', '++', 'o', ".+", 'xx', '//', '*', 'O', '.']
fig, (ax, ax_pull) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [golden**2,1], 'hspace':0.05}, figsize=figsize)
# Plot the dataset and figure out the normalisation
y, x = np.histogram(dataset,bins=n_bins)
err = (-0.5 + np.sqrt(np.array(y + 0.25)), +0.5 + np.sqrt(np.array(y + 0.25)))
bin_centers = (x[1:] + x[:-1]) / 2.0
ax.errorbar(bin_centers,y,err, color='black', label=dataset_name, fmt='o',markersize=2)
norm = get_norm(len(bin_centers),len(dataset))
# Plot total fit
ax.plot(*pdf.get_curve(norm=norm), color='black', label=fit_name)
# Plot separate contributions
curves = pdf.get_components_curve(norm=norm)
for count,c in enumerate(curves):
current_hatch = STYLES_hatches[count] if count<len(STYLES_hatches)-1 else 'none'
current_fc = STYLES_facecolor[count] if count<len(STYLES_facecolor)-1 else 'none'
current_color = next(ax._get_lines.prop_cycler)["color"]
ax.plot(*curves[c], color = current_color)
ax.fill_between(*curves[c], alpha=0.5,
hatch = current_hatch,
facecolor = current_fc,
edgecolor = current_color,
label=c)
# Calculate pull distribution
bin_hwidth = np.array([(bin_centers[1] - bin_centers[0])*0.5]*len(bin_centers))
pulls = -(np.interp(bin_centers,*pdf.get_curve(norm=norm)) - y) / (y)**0.5
#Draw pull distribution and color area under each
line,caps,_ = ax_pull.errorbar(bin_centers,pulls, xerr = bin_hwidth,
fmt='ko',
markersize=3,
ecolor = 'black')
ax_pull.bar(bin_centers,pulls,width=bin_hwidth*2,color='gray',alpha=0.5)
#Decorations, titles, ranges and names
# Plot legend
ax.legend(loc='best')
hfont = {'fontname':'sans-serif'}
#Setlimits
ax.set_xlim(min(dataset),max(dataset))
ax.set_ylim(0,)
ax_pull.set_xlim(min(dataset),max(dataset))
ylim = max(abs(min(pulls)*1.1),abs(max(pulls)*1.1))
ax_pull.set_ylim(-ylim,ylim)
#Set labels
ax.set_xticklabels([])
ax.set_ylabel(f'{y_name} / ( {bin_hwidth[0]:.1g} {unit})',**hfont)
ax_pull.set_ylabel('Pull',**hfont)
ax_pull.set_xlabel(f'{x_name}, {unit}',**hfont)
ax_pull.tick_params(which = 'both', top=True, right=True)
ax.tick_params(which = 'both',top=True, right=True)
fig.align_ylabels((ax,ax_pull))
return fig, (ax, ax_pull)
| 37.712526 | 170 | 0.617282 |
b184113847200f5b082e012a49155198ad3dc863 | 1,672 | py | Python | Python/shortest-distance-from-all-buildings.py | black-shadows/LeetCode-Solutions | b1692583f7b710943ffb19b392b8bf64845b5d7a | [
"Fair",
"Unlicense"
] | null | null | null | Python/shortest-distance-from-all-buildings.py | black-shadows/LeetCode-Solutions | b1692583f7b710943ffb19b392b8bf64845b5d7a | [
"Fair",
"Unlicense"
] | null | null | null | Python/shortest-distance-from-all-buildings.py | black-shadows/LeetCode-Solutions | b1692583f7b710943ffb19b392b8bf64845b5d7a | [
"Fair",
"Unlicense"
] | null | null | null | # Time: O(k * m * n), k is the number of the buildings
# Space: O(m * n)
class Solution(object):
def shortestDistance(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def bfs(grid, dists, cnts, x, y):
dist, m, n = 0, len(grid), len(grid[0])
visited = [[False for _ in xrange(n)] for _ in xrange(m)]
pre_level = [(x, y)]
visited[x][y] = True
while pre_level:
dist += 1
cur_level = []
for i, j in pre_level:
for dir in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
I, J = i+dir[0], j+dir[1]
if 0 <= I < m and 0 <= J < n and grid[I][J] == 0 and not visited[I][J]:
cnts[I][J] += 1
dists[I][J] += dist
cur_level.append((I, J))
visited[I][J] = True
pre_level = cur_level
m, n, cnt = len(grid), len(grid[0]), 0
dists = [[0 for _ in xrange(n)] for _ in xrange(m)]
cnts = [[0 for _ in xrange(n)] for _ in xrange(m)]
for i in xrange(m):
for j in xrange(n):
if grid[i][j] == 1:
cnt += 1
bfs(grid, dists, cnts, i, j)
shortest = float("inf")
for i in xrange(m):
for j in xrange(n):
if dists[i][j] < shortest and cnts[i][j] == cnt:
shortest = dists[i][j]
return shortest if shortest != float("inf") else -1
| 34.833333 | 96 | 0.393541 |
37b78476aaa166f10b9221d6e31c2dcab4516a26 | 14,392 | py | Python | bin/install/resources/starcluster/plugins/automount.py | aguadev/aguadev | db22858faa425b7af2743d98c31dabef644e519c | [
"MIT"
] | 1 | 2022-01-26T14:09:30.000Z | 2022-01-26T14:09:30.000Z | bin/install/resources/starcluster/plugins/automount.py | aguadev/aguadev | db22858faa425b7af2743d98c31dabef644e519c | [
"MIT"
] | null | null | null | bin/install/resources/starcluster/plugins/automount.py | aguadev/aguadev | db22858faa425b7af2743d98c31dabef644e519c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import re
import string
import sys
import time
import posixpath
import subprocess
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class NfsShares (ClusterSetup):
"""
Automatically mounts external NFS shares on StarCluster nodes
"""
def __init__(self, privatekey, publiccert, interval, sourcedirs, mountpoints, portmapport, nfsport, mountdport, cluster):
log.info("Loaded plugin: automount.NfsShares")
log.debug("automount.NfsShares.__init__ Initialising AutoMount plugin.")
log.debug("automount.NfsShares.__init__ privatekey %s" % privatekey)
log.debug("automount.NfsShares.__init__ publiccert %s" % publiccert)
log.debug("automount.NfsShares.__init__ interval %s" % interval)
log.debug("automount.NfsShares.__init__ sourcedirs %s" % sourcedirs)
log.debug("automount.NfsShares.__init__ mountpoints %s" % mountpoints)
log.debug("automount.NfsShares.__init__ portmapport %s" % portmapport)
log.debug("automount.NfsShares.__init__ nfsport %s" % nfsport)
log.debug("automount.NfsShares.__init__ mountdport %s" % mountdport)
log.debug("automount.NfsShares.__init__ cluster %s" % cluster)
self.privatekey = privatekey
self.publiccert = publiccert
self.portmapport = portmapport
self.nfsport = nfsport
self.mountdport = mountdport
self.cluster = cluster
# set default interval
if not interval: interval = 10
self.interval = interval
self.sourcedirs = sourcedirs.split(",")
self.mountpoints = mountpoints.split(",")
if len(self.sourcedirs) != len(self.mountpoints):
log.info("automount.NfsShares.__init__ length of sourcedirs ("
+ len(self.sourcedirs)
+ ") is not the same as length of mountpoints ("
+ len(self.mountpoints)
+ ")"
)
sys.exit(0)
def run(self, nodes, master, user, user_shell, volumes):
"""
Mount NFS shares on master and all nodes
"""
log.info("Running plugin automount")
log.debug("automount.NfsShares.run automount.NfsShares.run(nodes, master, user, user_shell, volumes)")
#### OPEN NFS-RELATED PORTS FOR THIS CLUSTER
self.openNfsPorts("default")
self.openNfsPorts('@sc-' + self.cluster)
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
#### FIX mountd PORT ON head AND MASTER/NODES
mountdport = "32767"
for node in nodes:
self.setMountdOnNode(node, mountdport)
self.setMountdOnHead(mountdport)
self.restartServicesOnHead()
#### MOUNT ON ALL NODES
for node in nodes:
self.mount(node)
log.info("Completed plugin automount")
def openNfsPorts(self, group):
"""
Open (fixed) NFS-related ports (portmap, nfs and mountd)
"""
portmapport = self.portmapport
nfsport = self.nfsport
mountdport = self.mountdport
log.info("Opening NFS-related ports for group: %s", group)
log.debug("automount.openNfsPorts group; %s", group)
log.debug("automount.openNfsPorts portmapport; %s", portmapport)
log.debug("automount.openNfsPorts nfsport; %s", nfsport)
log.debug("automount.openNfsPorts mountdport; %s", mountdport)
permissions = [
dict(group=group, port=nfsport, type="tcp"),
dict(group=group, port=nfsport, type="udp"),
dict(group=group, port=portmapport, type="tcp"),
dict(group=group, port=portmapport, type="udp"),
dict(group=group, port=mountdport, type="tcp"),
dict(group=group, port=mountdport, type="udp")
]
#### OPEN PORTS FROM HEAD NODE (NO SSH FROM MASTER)
commands = self.setPortCommands(group, permissions)
for command in commands:
self.runSystemCommand(command);
def setPortCommands(self, group, permissions):
groupPermissions = self.getGroupPermissions(group)
log.debug("automount.NfsShares.setPortCommands groupPermissions: %s", groupPermissions)
#### FILTER OUT EXISTING PERMISSIONS
permissions = self.filterPermissions(permissions, groupPermissions)
#### SET EC2 KEY FILE ENVIRONMENT VARIABLES
ec2vars = self.getEC2Vars()
commands = []
for permission in permissions:
command = ec2vars + 'ec2-authorize ' + permission['group'] + ' -p ' + permission['port'] + ' -P ' + permission['type']
commands.append(command)
return commands
def getGroupPermissions(self, group):
ec2vars = self.getEC2Vars()
ec2dgrp = self.runSystemCommand(ec2vars + 'ec2dgrp ' + group)
permissions = ec2dgrp.split("\n")
permissions[:1] = []
return permissions
def filterPermissions(self, permissions, groupPermissions):
log.info("Filtering to exclude existing permissions")
missing = []
for i, v in enumerate(permissions):
found = 0
for index, value in enumerate(groupPermissions):
if value == '': break
elements = value.split("\t")
type = elements[4]
port = elements[5]
if type == '' or value == '': break
if type == v['type'] and port == v['port']:
found = 1
break
if found == 0:
missing.append(v)
return missing
def getEC2Vars(self):
ec2vars = "export EC2_PRIVATE_KEY=" + self.privatekey + "; "
ec2vars += "export EC2_CERT=" + self.publiccert + "; "
return ec2vars
def runSystemCommand(self, command):
log.debug(command)
return subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).stdout.read()
def getHeadIp(self):
log.info("automount.NfsShares.getHeadIp Getting headnode internal IP")
p = os.popen('curl -s http://169.254.169.254/latest/meta-data/instance-id');
instanceid = p.read()
log.debug("automount.NfsShares.getHeadIp instanceid: %s" % instanceid)
command = "ec2-describe-instances -K " + self.privatekey \
+ " -C " + self.publiccert \
+ " " + instanceid
log.debug("automount.NfsShares.getHeadIp command: %s" % command)
p = os.popen(command);
reservation = p.read()
log.debug("automount.NfsShares.getHeadIp reservation: %s" % reservation)
instance = reservation.split("INSTANCE")[1];
log.debug("automount.NfsShares.getHeadIp instance: %s" % instance)
instanceRow = instance.split('\t')
self.head_ip = instanceRow[17]
log.debug("automount.NfsShares.getHeadIp self.head_ip: %s" % self.head_ip)
def mount(self, node):
"""
Mount shares from head node on master and exec nodes
"""
log.info("Mounting shared from head node to %s", node.alias)
log.debug("automount.NfsShares.mount node.private_dns_name: %s" % node.private_dns_name)
log.debug("automount.NfsShares.mount self.head_ip: %s" % self.head_ip)
#### INSERT MOUNT POINT ENTRIES INTO /etc/fstab ON NODE
log.debug("automount.NfsShares.on_add_node Doing self._addToFstab")
for i in range(len(self.sourcedirs)):
self._addToFstab(node, self.sourcedirs[i], self.head_ip, self.mountpoints[i], self.interval)
#### INSERT ENTRIES FOR MASTER/NODES INTO /etc/exports ON HEAD NODE
log.debug("automount.NfsShares.mount Doing self._addToExports")
for i in range(len(self.sourcedirs)):
self._addToExports(node, self.sourcedirs[i])
#### MOUNT THE SHARES
for i in range(len(self.sourcedirs)):
self.mountShares(node, self.sourcedirs[i], self.head_ip, self.mountpoints[i], self.interval)
def _addToFstab(self, node, sourcedir, sourceip, mountpoint, interval):
"""
Add entries to /etc/fstab on master/exec nodes
"""
log.info("Adding /etc/fstab entry (%s on %s)", mountpoint, node.alias)
insert = self.head_ip + ":" + sourcedir + " " + mountpoint + " nfs nfsvers=3,defaults 0 0"
cmd = "echo '" + insert + "' >> /etc/fstab ;"
log.debug(cmd)
node.ssh.execute(cmd)
def _addToExports(self, node, sourcedir):
"""
Add entries to /etc/exports on head node
"""
log.info("Adding /etc/exports entry (%s to %s)", sourcedir, node.alias)
insert = sourcedir + " " + node.private_ip_address + "(async,no_root_squash,no_subtree_check,rw)"
f = open("/etc/exports", 'rb')
contents = f.read()
f.close()
insert = sourcedir + " " + node.private_ip_address + "(async,no_root_squash,no_subtree_check,rw)\n"
contents = string.replace(contents, insert,"")
contents += insert
f = open("/etc/exports", 'w')
f.write(contents)
f.close()
os.system("exportfs -ra")
os.system("service portmap restart")
os.system("service nfs restart")
def _removeFromExports(self, node, sourcedir):
"""
Remove entries from /etc/exports on head node
"""
log.info("Removing from /etc/exports entry (%s to %s)", sourcedir, node.alias)
f = open("/etc/exports", 'rb')
contents = f.read()
f.close()
insert = sourcedir + " " + node.private_ip_address + "(async,no_root_squash,no_subtree_check,rw)\n"
contents = string.replace(contents, insert,"")
f = open("/etc/exports", 'w')
f.write(contents)
f.close()
def setMountdOnNode(self, node, mountdport):
"""
Fix mountd port to same number on all hosts - head, master and exec nodes
"""
log.info("Setting mountd port on %s", node.alias)
cmd = self.mountdCommand(mountdport)
log.debug("Doing node.ssh.execute: " + cmd)
node.ssh.execute(cmd)
def setMountdOnHead(self, mountdport):
cmd = self.mountdCommand(mountdport)
log.debug("Doing os.system: " + cmd)
os.system(cmd)
def restartServicesOnNode(self, node):
node.ssh.execute("service portmap restart")
node.ssh.execute("service nfs restart")
def restartServicesOnHead(self):
os.system("service portmap restart")
os.system("service nfs restart")
def mountdCommand(self, mountdport):
"""
LATER: DETERMINE COMMAND USING uname -a ON NODE
E.G.: centos
nfsconfig = "/etc/sysconfig/nfs"
insert = "MOUNTD_PORT=" + mountdport
"""
#### ubuntu
nfsconfig = "/etc/default/nfs-kernel-server"
insert = "RPCMOUNTDOPTS=\"--port " + mountdport + " --manage-gids\""
return "echo '" + insert + "' >> " + nfsconfig + ";"
def mountShares(self, node, sourcedir, sourceip, mountpoint, interval):
"""
Mount the shares on the local filesystem - wait <interval> seconds between tries
"""
log.info("Mounting NFS shares on %s", node.alias)
cmd = "mount -t nfs " + sourceip + ":" + sourcedir + " " + mountpoint
log.info(cmd)
if not node.ssh.isdir(mountpoint): node.ssh.makedirs(mountpoint)
# TRY REPEATEDLY TO MOUNT
file_list = []
while not file_list:
log.debug("automount.NfsShares.mountShares cmd: %s" % cmd)
node.ssh.execute(cmd)
file_list = node.ssh.ls(mountpoint)
if file_list: break
log.debug("Sleeping %s seconds" % interval)
time.sleep(float(interval))
def on_add_node(self, node, nodes, master, user, user_shell, volumes):
log.info("Doing 'on_add_node' for plugin: automount.NfsShares");
log.info("Adding node %s", node.alias)
log.debug("automount.NfsShares.on_add_node ")
log.debug("automount.NfsShares.on_add_node node.private_dns_name: %s" % node.private_dns_name)
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
#### INSERT MOUNT POINT ENTRIES INTO /etc/fstab ON NODE
log.debug("automount.NfsShares.on_add_node Doing self._addToFstab")
for i in range(len(self.sourcedirs)):
self._addToFstab(node, self.sourcedirs[i], self.head_ip, self.mountpoints[i], self.interval)
#### INSERT EXPORT ENTRIES FOR NODE INTO /etc/exports ON HEAD NODE
log.debug("automount.NfsShares.on_add_node Doing self._addToExports")
for i in range(len(self.sourcedirs)):
self._addToExports(node, self.sourcedirs[i])
#### FIX mountd PORT ON head AND MASTER/
mountdport = "32767"
self.setMountdOnNode(node, mountdport)
self.setMountdOnHead(mountdport)
self.restartServicesOnHead()
#### MOUNT THE SHARES
for i in range(len(self.sourcedirs)):
self.mountShares(node, self.sourcedirs[i], self.head_ip, self.mountpoints[i], self.interval)
log.info("Completed 'on_add_node' for plugin: automount.NfsShares");
def on_remove_node(self, node, nodes, master, user, user_shell, volumes):
log.info("Doing on_remove_node for plugin: automount.NfsShares")
log.info("Removing %s " % node.alias)
log.debug("automount.NfsShares.on_remove_node Removing %s from cluster" % node.alias)
log.debug("automount.NfsShares.on_remove_node node.private_dns_name: %s" % node.private_dns_name)
# REMOVE ENTRIES FROM /etc/exports ON HEAD NODE
for i in range(len(self.sourcedirs)):
self._removeFromExports(node, self.sourcedirs[i])
# RESTART NFS ON HEAD
log.info("automount.NfsShares.on_remove_node Restarting NFS on head node")
os.system("service portmap restart")
os.system("service nfs restart")
| 41.356322 | 130 | 0.603877 |
f8aa6d581c25bb06744dd4e43854b79ed16922a3 | 14,852 | py | Python | doit_doc_template/core/yaml.py | i386x/abcdoc | 04246ce33a480ce3039832db80d0f5c86a86ff54 | [
"MIT"
] | null | null | null | doit_doc_template/core/yaml.py | i386x/abcdoc | 04246ce33a480ce3039832db80d0f5c86a86ff54 | [
"MIT"
] | null | null | null | doit_doc_template/core/yaml.py | i386x/abcdoc | 04246ce33a480ce3039832db80d0f5c86a86ff54 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#! \file ~/doit_doc_template/core/yaml.py
#! \author Jiří Kučera, <sanczes AT gmail.com>
#! \stamp 2019-05-18 16:03:58 +0200
#! \project DoIt! Doc: Sphinx Extension for DoIt! Documentation
#! \license MIT
#! \version See doit_doc_template.__version__
#! \brief See __doc__
#
"""\
Loading YAML files.\
"""
__license__ = """\
Copyright (c) 2014 - 2019 Jiří Kučera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
import datetime
from yaml import MappingNode, MarkedYAMLError
from yaml.composer import Composer
from yaml.constructor import ConstructorError, SafeConstructor
from yaml.parser import Parser
from yaml.reader import Reader
from yaml.resolver import Resolver
from yaml.scanner import Scanner
def load(stream, loadercls, **kwargs):
"""
"""
loader = loadercls(stream, **kwargs)
try:
return loader.get_single_data()
finally:
loader.dispose()
#-def
class YamlObject(object):
"""
"""
def __init__(self):
"""
"""
self.mark = None
#-def
def wrap(self, value):
"""
"""
if isinstance(value, bool):
data = YamlBool(value)
elif isinstance(value, int):
data = YamlInt(value)
elif isinstance(value, float):
data = YamlFloat(value)
elif isinstance(value, bytes):
data = YamlBytes(value)
elif isinstance(value, datetime.date):
data = YamlDate(value.year, value.month, value.day)
elif isinstance(value, datetime.datetime):
data = YamlDateTime(
value.year, value.month, value.day,
value.hour, value.minute, value.second,
value.microsecond
)
elif isinstance(value, str):
data = YamlStr(value)
elif isinstance(value, list):
data = YamlList(value)
elif isinstance(value, dict):
data = YamlDict(value)
else:
raise MarkedYAMLError(
"while wrapping value", self.mark,
"invalid type of value to wrap ('{}')".format(
type(value).__name__
),
self.mark
)
data.mark = self.mark
return data
#-def
#-class
class YamlBool(YamlObject):
"""
"""
__slots__ = ["__value"]
def __init__(self, value):
"""
"""
YamlObject.__init__(self)
self.__value = value
#-def
def __bool__(self):
"""
"""
return self.__value
#-def
#-class
class YamlInt(int, YamlObject):
"""
"""
__slots__ = []
def __new__(cls, *args, **kwargs):
"""
"""
return super(YamlInt, cls).__new__(cls, *args, **kwargs)
#-def
def __init__(self, *args, **kwargs):
"""
"""
int.__init__(self)
YamlObject.__init__(self)
#-def
#-class
class YamlFloat(float, YamlObject):
"""
"""
__slots__ = []
def __new__(cls, *args, **kwargs):
"""
"""
return super(YamlFloat, cls).__new__(cls, *args, **kwargs)
#-def
def __init__(self, *args, **kwargs):
"""
"""
float.__init__(self)
YamlObject.__init__(self)
#-def
#-class
class YamlBytes(bytes, YamlObject):
"""
"""
__slots__ = []
def __new__(cls, *args, **kwargs):
"""
"""
return super(YamlBytes, cls).__new__(cls, *args, **kwargs)
#-def
def __init__(self, *args, **kwargs):
"""
"""
bytes.__init__(self)
YamlObject.__init__(self)
#-def
#-class
class YamlDate(datetime.date, YamlObject):
"""
"""
__slots__ = []
def __init__(self, *args, **kwargs):
"""
"""
datetime.date.__init__(self, *args, **kwargs)
YamlObject.__init__(self)
#-def
#-class
class YamlDateTime(datetime.datetime, YamlObject):
"""
"""
__slots__ = []
def __init__(self, *args, **kwargs):
"""
"""
datetime.datetime.__init__(self, *args, **kwargs)
YamlObject.__init__(self)
#-def
#-class
class YamlStr(str, YamlObject):
"""
"""
__slots__ = []
def __new__(cls, *args, **kwargs):
"""
"""
return super(YamlStr, cls).__new__(cls, *args, **kwargs)
#-def
def __init__(self, *args, **kwargs):
"""
"""
str.__init__(self)
YamlObject.__init__(self)
#-def
#-class
class YamlList(list, YamlObject):
"""
"""
__slots__ = []
def __init__(self, *args, **kwargs):
"""
"""
list.__init__(self, *args, **kwargs)
YamlObject.__init__(self)
#-def
#-class
class YamlDict(dict, YamlObject):
"""
"""
__slots__ = []
def __init__(self, *args, **kwargs):
"""
"""
dict.__init__(self, *args, **kwargs)
YamlObject.__init__(self)
#-def
#-class
class YamlConstructor(SafeConstructor):
"""
"""
__slots__ = ["__filename"]
def __init__(self, filename=None):
"""
"""
SafeConstructor.__init__(self)
self.__filename = filename
#-def
def construct_yaml_null(self, node):
"""
"""
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
raise ConstructorError(
"while constructing", mark,
"null values are forbidden by the customized YAML loader", mark
)
#-def
def construct_yaml_bool(self, node):
"""
"""
data = YamlBool(SafeConstructor.construct_yaml_bool(self, node))
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
data.mark = mark
return data
#-def
def construct_yaml_int(self, node):
"""
"""
data = YamlInt(SafeConstructor.construct_yaml_int(self, node))
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
data.mark = mark
return data
#-def
def construct_yaml_float(self, node):
"""
"""
data = YamlFloat(SafeConstructor.construct_yaml_float(self, node))
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
data.mark = mark
return data
#-def
def construct_yaml_binary(self, node):
"""
"""
data = YamlBytes(SafeConstructor.construct_yaml_binary(self, node))
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
data.mark = mark
return data
#-def
def construct_yaml_timestamp(self, node):
"""
"""
timestamp = SafeConstructor.construct_yaml_timestamp(self, node)
if isinstance(timestamp, datetime.date):
data = YamlDate(timestamp.year, timestamp.month, timestamp.day)
else:
data = YamlDateTime(
timestamp.year, timestamp.month, timestamp.day,
timestamp.hour, timestamp.minute, timestamp.second,
timestamp.microsecond
)
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
data.mark = mark
return data
#-def
def construct_yaml_omap(self, node):
"""
"""
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
raise ConstructorError(
"while constructing", mark,
"ordered maps are not supported by the customized YAML loader,"
" please use an ordinary map", mark
)
#-def
def construct_yaml_pairs(self, node):
"""
"""
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
raise ConstructorError(
"while constructing", mark,
"pairs are not supported by the customized YAML loader,"
" please use a list of maps", mark
)
#-def
def construct_yaml_set(self, node):
"""
"""
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
raise ConstructorError(
"while constructing", mark,
"sets are not supported by the customized YAML loader,"
" please use an ordinary map", mark
)
#-def
def construct_yaml_str(self, node):
"""
"""
data = YamlStr(SafeConstructor.construct_yaml_str(self, node))
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
data.mark = mark
return data
#-def
def construct_yaml_seq(self, node):
"""
"""
data = YamlList([])
yield data
data.extend(self.construct_sequence(node))
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
data.mark = mark
#-def
def construct_yaml_map(self, node):
"""
"""
data = YamlDict({})
yield data
data.update(self.construct_mapping(node))
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
data.mark = mark
#-def
def construct_yaml_object(self, node):
"""
"""
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
raise ConstructorError(
"while constructing", mark,
"constructing arbitrary objects is not supported by the customized"
" YAML loader", mark
)
#-def
def construct_mapping(self, node, deep=False):
"""
"""
mark = node.start_mark
if self.__filename is not None:
mark.name = self.__filename
if not isinstance(node, MappingNode):
raise ConstructorError(
None, None,
"expected a mapping node, but found {}".format(node.id), mark
)
self.flatten_mapping(node)
mapping = YamlDict({})
mapping.mark = mark
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError(
"while constructing a mapping", mark,
"found unacceptable key ({})".format(str(exc)),
key_node.start_mark
)
if key in mapping:
raise ConstructorError(
"while constructing a mapping", mark,
"found a duplicate dict key ({})".format(str(key)),
key_node.start_mark
)
mapping[key] = self.construct_object(value_node, deep=deep)
return mapping
#-def
#-class
YamlConstructor.add_constructor(
"tag:yaml.org,2002:null",
YamlConstructor.construct_yaml_null
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:python/none",
YamlConstructor.construct_yaml_null
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:bool",
YamlConstructor.construct_yaml_bool
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:python/bool",
YamlConstructor.construct_yaml_bool
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:int",
YamlConstructor.construct_yaml_int
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:python/int",
YamlConstructor.construct_yaml_int
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:float",
YamlConstructor.construct_yaml_float
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:python/float",
YamlConstructor.construct_yaml_float
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:binary",
YamlConstructor.construct_yaml_binary
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:timestamp",
YamlConstructor.construct_yaml_timestamp
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:omap",
YamlConstructor.construct_yaml_omap
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:pairs",
YamlConstructor.construct_yaml_pairs
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:set",
YamlConstructor.construct_yaml_set
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:str",
YamlConstructor.construct_yaml_str
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:python/str",
YamlConstructor.construct_yaml_str
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:python/unicode",
YamlConstructor.construct_yaml_str
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:seq",
YamlConstructor.construct_yaml_seq
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:python/list",
YamlConstructor.construct_yaml_seq
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:map",
YamlConstructor.construct_yaml_map
)
YamlConstructor.add_constructor(
"tag:yaml.org,2002:python/dict",
YamlConstructor.construct_yaml_map
)
class YamlLoader(Reader, Scanner, Parser, Composer, YamlConstructor, Resolver):
"""
"""
__slots__ = []
def __init__(self, stream, filename=None):
"""
"""
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
YamlConstructor.__init__(self, filename)
Resolver.__init__(self)
#-def
#-class
| 25.301533 | 79 | 0.5946 |
0e35cec481bf2319eb9437badeca860361bf0255 | 40,474 | py | Python | celery/concurrency/processes/pool.py | cangove/celery | 20be2ef744efc52d1e11ef7dc3ae0edd53dcb8f9 | [
"BSD-3-Clause"
] | 1 | 2016-08-19T19:42:22.000Z | 2016-08-19T19:42:22.000Z | celery/concurrency/processes/pool.py | cangove/celery | 20be2ef744efc52d1e11ef7dc3ae0edd53dcb8f9 | [
"BSD-3-Clause"
] | null | null | null | celery/concurrency/processes/pool.py | cangove/celery | 20be2ef744efc52d1e11ef7dc3ae0edd53dcb8f9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
from __future__ import absolute_import
#
# Imports
#
import collections
import errno
import itertools
import logging
import os
import signal
import sys
import threading
import time
import Queue
import warnings
from multiprocessing import cpu_count, TimeoutError, Event
from multiprocessing import util
from multiprocessing.util import Finalize, debug
from celery.datastructures import ExceptionInfo
from celery.exceptions import SoftTimeLimitExceeded, TimeLimitExceeded
from celery.exceptions import WorkerLostError
from .process import Process
_Semaphore = threading._Semaphore
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Constants representing the state of a job
#
ACK = 0
READY = 1
# Signal used for soft time limits.
SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None)
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return map(*args)
def error(msg, *args, **kwargs):
if util._logger:
util._logger.error(msg, *args, **kwargs)
def safe_apply_callback(fun, *args):
if fun:
try:
fun(*args)
except BaseException, exc:
error("Pool callback raised exception: %r", exc,
exc_info=sys.exc_info())
class LaxBoundedSemaphore(threading._Semaphore):
"""Semaphore that checks that # release is <= # acquires,
but ignores if # releases >= value."""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
if sys.version_info >= (3, 0):
def release(self):
if self._value < self._initial_value:
_Semaphore.release(self)
if __debug__:
self._note("%s.release: success, value=%s (unchanged)" % (
self, self._value))
def clear(self):
while self._value < self._initial_value:
_Semaphore.release(self)
else:
def release(self): # noqa
if self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
if __debug__:
self._note("%s.release: success, value=%s (unchanged)" % (
self, self._Semaphore__value))
def clear(self): # noqa
while self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
#
# Exceptions
#
class MaybeEncodingError(Exception):
"""Wraps unpickleable object."""
def __init__(self, exc, value):
self.exc = str(exc)
self.value = repr(value)
Exception.__init__(self, self.exc, self.value)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'." % (
self.value, self.exc)
class WorkersJoined(Exception):
"""All workers have terminated."""
def soft_timeout_sighandler(signum, frame):
raise SoftTimeLimitExceeded()
#
# Code run by worker processes
#
def worker(inqueue, outqueue, initializer=None, initargs=(),
maxtasks=None, sentinel=None):
# Re-init logging system.
# Workaround for http://bugs.python.org/issue6721#msg140215
# Python logging module uses RLock() objects which are broken after
# fork. This can result in a deadlock (Issue #496).
logger_names = logging.Logger.manager.loggerDict.keys()
logger_names.append(None) # for root logger
for name in logger_names:
for handler in logging.getLogger(name).handlers:
handler.createLock()
logging._lock = threading.RLock()
pid = os.getpid()
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_reader'):
def poll(timeout):
if inqueue._reader.poll(timeout):
return True, get()
return False, None
else:
def poll(timeout): # noqa
try:
return True, get(timeout=timeout)
except Queue.Empty:
return False, None
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
if SIG_SOFT_TIMEOUT is not None:
signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
if sentinel is not None and sentinel.is_set():
debug('worker got sentinel -- exiting')
break
try:
ready, task = poll(1.0)
if not ready:
continue
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
put((ACK, (job, i, time.time(), pid)))
try:
result = (True, func(*args, **kwds))
except Exception:
result = (False, ExceptionInfo(sys.exc_info()))
try:
put((READY, (job, i, result)))
except Exception, exc:
_, _, tb = sys.exc_info()
wrapped = MaybeEncodingError(exc, result[1])
einfo = ExceptionInfo((MaybeEncodingError, wrapped, tb))
put((READY, (job, i, (False, einfo))))
completed += 1
debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
#
class PoolThread(threading.Thread):
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
self._state = RUN
self.daemon = True
def run(self):
try:
return self.body()
except Exception, exc:
error("Thread %r crashed: %r" % (self.__class__.__name__, exc, ),
exc_info=sys.exc_info())
os._exit(1)
def terminate(self):
self._state = TERMINATE
def close(self):
self._state = CLOSE
class Supervisor(PoolThread):
def __init__(self, pool):
self.pool = pool
super(Supervisor, self).__init__()
def body(self):
debug('worker handler starting')
while self._state == RUN and self.pool._state == RUN:
self.pool._maintain_pool()
time.sleep(0.8)
debug('worker handler exiting')
class TaskHandler(PoolThread):
def __init__(self, taskqueue, put, outqueue, pool):
self.taskqueue = taskqueue
self.put = put
self.outqueue = outqueue
self.pool = pool
super(TaskHandler, self).__init__()
def body(self):
taskqueue = self.taskqueue
outqueue = self.outqueue
put = self.put
pool = self.pool
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if self._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i + 1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
class TimeoutHandler(PoolThread):
def __init__(self, processes, cache, t_soft, t_hard):
self.processes = processes
self.cache = cache
self.t_soft = t_soft
self.t_hard = t_hard
super(TimeoutHandler, self).__init__()
def body(self):
processes = self.processes
cache = self.cache
t_hard, t_soft = self.t_hard, self.t_soft
dirty = set()
def _process_by_pid(pid):
for index, process in enumerate(processes):
if process.pid == pid:
return process, index
return None, None
def _timed_out(start, timeout):
if not start or not timeout:
return False
if time.time() >= start + timeout:
return True
def _on_soft_timeout(job, i, soft_timeout):
debug('soft time limit exceeded for %i' % i)
process, _index = _process_by_pid(job._worker_pid)
if not process:
return
# Run timeout callback
if job._timeout_callback is not None:
job._timeout_callback(soft=True, timeout=soft_timeout)
try:
os.kill(job._worker_pid, SIG_SOFT_TIMEOUT)
except OSError, exc:
if exc.errno == errno.ESRCH:
pass
else:
raise
dirty.add(i)
def _on_hard_timeout(job, i, hard_timeout):
if job.ready():
return
debug('hard time limit exceeded for %i', i)
# Remove from cache and set return value to an exception
exc_info = None
try:
raise TimeLimitExceeded(hard_timeout)
except TimeLimitExceeded:
exc_info = sys.exc_info()
job._set(i, (False, ExceptionInfo(exc_info)))
# Remove from _pool
process, _index = _process_by_pid(job._worker_pid)
# Run timeout callback
if job._timeout_callback is not None:
job._timeout_callback(soft=False, timeout=hard_timeout)
if process:
process.terminate()
# Inner-loop
while self._state == RUN:
# Remove dirty items not in cache anymore
if dirty:
dirty = set(k for k in dirty if k in cache)
for i, job in cache.items():
ack_time = job._time_accepted
soft_timeout = job._soft_timeout
if soft_timeout is None:
soft_timeout = t_soft
hard_timeout = job._timeout
if hard_timeout is None:
hard_timeout = t_hard
if _timed_out(ack_time, hard_timeout):
_on_hard_timeout(job, i, hard_timeout)
elif i not in dirty and _timed_out(ack_time, soft_timeout):
_on_soft_timeout(job, i, soft_timeout)
time.sleep(0.5) # Don't waste CPU cycles.
debug('timeout handler exiting')
class ResultHandler(PoolThread):
def __init__(self, outqueue, get, cache, poll,
join_exited_workers, putlock):
self.outqueue = outqueue
self.get = get
self.cache = cache
self.poll = poll
self.join_exited_workers = join_exited_workers
self.putlock = putlock
super(ResultHandler, self).__init__()
def body(self):
get = self.get
outqueue = self.outqueue
cache = self.cache
poll = self.poll
join_exited_workers = self.join_exited_workers
putlock = self.putlock
def on_ack(job, i, time_accepted, pid):
try:
cache[job]._ack(i, time_accepted, pid)
except (KeyError, AttributeError):
# Object gone or doesn't support _ack (e.g. IMAPIterator).
pass
def on_ready(job, i, obj):
try:
item = cache[job]
except KeyError:
return
if not item.ready():
if putlock is not None:
putlock.release()
try:
item._set(i, obj)
except KeyError:
pass
state_handlers = {ACK: on_ack, READY: on_ready}
def on_state_change(task):
state, args = task
try:
state_handlers[state](*args)
except KeyError:
debug("Unknown job state: %s (args=%s)" % (state, args))
debug('result handler starting')
while 1:
try:
ready, task = poll(1.0)
except (IOError, EOFError), exc:
debug('result handler got %r -- exiting' % (exc, ))
return
if self._state:
assert self._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if ready:
if task is None:
debug('result handler got sentinel')
break
on_state_change(task)
time_terminate = None
while cache and self._state != TERMINATE:
try:
ready, task = poll(1.0)
except (IOError, EOFError), exc:
debug('result handler got %r -- exiting' % (exc, ))
return
if ready:
if task is None:
debug('result handler ignoring extra sentinel')
continue
on_state_change(task)
try:
join_exited_workers(shutdown=True)
except WorkersJoined:
now = time.time()
if not time_terminate:
time_terminate = now
else:
if now - time_terminate > 5.0:
debug('result handler exiting: timed out')
break
debug('result handler: all workers terminated, '
'timeout in %ss' % (
abs(min(now - time_terminate - 5.0, 0))))
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), self._state)
class Pool(object):
'''
Class which supports an async version of the `apply()` builtin
'''
Process = Process
Supervisor = Supervisor
TaskHandler = TaskHandler
TimeoutHandler = TimeoutHandler
ResultHandler = ResultHandler
SoftTimeLimitExceeded = SoftTimeLimitExceeded
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, timeout=None, soft_timeout=None,
force_execv=False):
self._setup_queues()
self._taskqueue = Queue.Queue()
self._cache = {}
self._state = RUN
self.timeout = timeout
self.soft_timeout = soft_timeout
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
self._force_execv = force_execv
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning("Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal."))
soft_timeout = None
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
self._processes = processes
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
self._pool = []
self._poolctrl = {}
for i in range(processes):
self._create_worker_process()
self._worker_handler = self.Supervisor(self)
self._worker_handler.start()
self._putlock = LaxBoundedSemaphore(self._processes)
self._task_handler = self.TaskHandler(self._taskqueue,
self._quick_put,
self._outqueue,
self._pool)
self._task_handler.start()
# Thread killing timedout jobs.
self._timeout_handler = None
self._timeout_handler_mutex = threading.Lock()
if self.timeout is not None or self.soft_timeout is not None:
self._start_timeout_handler()
# Thread processing results in the outqueue.
self._result_handler = self.ResultHandler(self._outqueue,
self._quick_get, self._cache,
self._poll_result,
self._join_exited_workers,
self._putlock)
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue,
self._pool, self._worker_handler, self._task_handler,
self._result_handler, self._cache,
self._timeout_handler),
exitpriority=15,
)
def _create_worker_process(self):
sentinel = Event()
w = self.Process(
force_execv=self._force_execv,
target=worker,
args=(self._inqueue, self._outqueue,
self._initializer, self._initargs,
self._maxtasksperchild,
sentinel),
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
self._poolctrl[w.pid] = sentinel
return w
def _join_exited_workers(self, shutdown=False):
"""Cleanup after any worker processes which have exited due to
reaching their specified lifetime. Returns True if any workers were
cleaned up.
"""
now = None
# The worker may have published a result before being terminated,
# but we have no way to accurately tell if it did. So we wait for
# _lost_worker_timeout seconds before we mark the job with
# WorkerLostError.
for job in [job for job in self._cache.values()
if not job.ready() and job._worker_lost]:
now = now or time.time()
if now - job._worker_lost > job._lost_worker_timeout:
exc_info = None
try:
raise WorkerLostError("Worker exited prematurely.")
except WorkerLostError:
exc_info = ExceptionInfo(sys.exc_info())
job._set(None, (False, exc_info))
if shutdown and not len(self._pool):
raise WorkersJoined()
cleaned = []
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
debug('Supervisor: cleaning up worker %d' % i)
worker.join()
debug('Supervisor: worked %d joined' % i)
cleaned.append(worker.pid)
del self._pool[i]
del self._poolctrl[worker.pid]
if cleaned:
for job in self._cache.values():
for worker_pid in job.worker_pids():
if worker_pid in cleaned and not job.ready():
job._worker_lost = time.time()
continue
if self._putlock is not None:
for worker in cleaned:
self._putlock.release()
return True
return False
def shrink(self, n=1):
for i, worker in enumerate(self._iterinactive()):
self._processes -= 1
if self._putlock:
self._putlock._initial_value -= 1
self._putlock.acquire()
worker.terminate()
if i == n - 1:
return
raise ValueError("Can't shrink pool. All processes busy!")
def grow(self, n=1):
for i in xrange(n):
#assert len(self._pool) == self._processes
self._processes += 1
if self._putlock:
cond = self._putlock._Semaphore__cond
cond.acquire()
try:
self._putlock._initial_value += 1
self._putlock._Semaphore__value += 1
cond.notify()
finally:
cond.release()
def _iterinactive(self):
for worker in self._pool:
if not self._worker_active(worker):
yield worker
raise StopIteration()
def _worker_active(self, worker):
for job in self._cache.values():
if worker.pid in job.worker_pids():
return True
return False
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
if self._state != RUN:
return
self._create_worker_process()
debug('added worker')
def _maintain_pool(self):
""""Clean up any exited workers and start replacements for them.
"""
self._join_exited_workers()
self._repopulate_pool()
def _setup_queues(self):
from multiprocessing.queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _poll_result(timeout):
if self._outqueue._reader.poll(timeout):
return True, self._quick_get()
return False, None
self._poll_result = _poll_result
def _start_timeout_handler(self):
# ensure more than one thread does not start the timeout handler
# thread at once.
self._timeout_handler_mutex.acquire()
try:
if self._timeout_handler is None:
self._timeout_handler = self.TimeoutHandler(
self._pool, self._cache,
self.soft_timeout, self.timeout)
self._timeout_handler.start()
finally:
self._timeout_handler_mutex.release()
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `apply()` builtin
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Equivalent of `map()` builtin
'''
assert self._state == RUN
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1, lost_worker_timeout=10.0):
'''
Equivalent of `itertools.imap()` -- can be MUCH slower
than `Pool.map()`
'''
assert self._state == RUN
if chunksize == 1:
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1,
lost_worker_timeout=10.0):
'''
Like `imap()` method but ordering of results is arbitrary
'''
assert self._state == RUN
if chunksize == 1:
result = IMapUnorderedIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={},
callback=None, accept_callback=None, timeout_callback=None,
waitforslot=False, error_callback=None,
soft_timeout=None, timeout=None):
'''
Asynchronous equivalent of `apply()` builtin.
Callback is called when the functions return value is ready.
The accept callback is called when the job is accepted to be executed.
Simplified the flow is like this:
>>> if accept_callback:
... accept_callback()
>>> retval = func(*args, **kwds)
>>> if callback:
... callback(retval)
'''
assert self._state == RUN
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning("Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal."))
soft_timeout = None
if waitforslot and self._putlock is not None and self._state == RUN:
self._putlock.acquire()
if self._state == RUN:
result = ApplyResult(self._cache, callback,
accept_callback, timeout_callback,
error_callback, soft_timeout, timeout)
if timeout or soft_timeout:
# start the timeout handler thread when required.
self._start_timeout_handler()
self._taskqueue.put(([(result._job, None,
func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None):
'''
Asynchronous equivalent of `map()` builtin
'''
assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between '
'processes or pickled')
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler.close()
self._worker_handler.join()
self._taskqueue.put(None)
if self._putlock:
self._putlock.clear()
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler.terminate()
self._terminate()
def join(self):
assert self._state in (CLOSE, TERMINATE)
debug('joining worker handler')
self._worker_handler.join()
debug('joining task handler')
self._task_handler.join()
debug('joining result handler')
self._result_handler.join()
debug('result handler joined')
for i, p in enumerate(self._pool):
debug('joining worker %s/%s (%r)' % (i, len(self._pool), p, ))
p.join()
def restart(self):
for e in self._poolctrl.itervalues():
e.set()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler,
result_handler, cache, timeout_handler):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler.terminate()
task_handler.terminate()
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
result_handler.terminate()
outqueue.put(None) # sentinel
if timeout_handler is not None:
timeout_handler.terminate()
# Terminate workers which haven't already finished
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
task_handler.join(1e100)
debug('joining result handler')
result_handler.join(1e100)
if timeout_handler is not None:
debug('joining timeout handler')
timeout_handler.join(1e100)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
debug('pool workers joined')
DynamicPool = Pool
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
_worker_lost = None
def __init__(self, cache, callback, accept_callback=None,
timeout_callback=None, error_callback=None, soft_timeout=None,
timeout=None, lost_worker_timeout=10.0):
self._mutex = threading.Lock()
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._ready = False
self._callback = callback
self._accept_callback = accept_callback
self._errback = error_callback
self._timeout_callback = timeout_callback
self._timeout = timeout
self._soft_timeout = soft_timeout
self._lost_worker_timeout = lost_worker_timeout
self._accepted = False
self._worker_pid = None
self._time_accepted = None
cache[self._job] = self
def ready(self):
return self._ready
def accepted(self):
return self._accepted
def successful(self):
assert self._ready
return self._success
def worker_pids(self):
return filter(None, [self._worker_pid])
def wait(self, timeout=None):
self._cond.acquire()
try:
if not self._ready:
self._cond.wait(timeout)
finally:
self._cond.release()
def get(self, timeout=None):
self.wait(timeout)
if not self._ready:
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._mutex.acquire()
try:
self._success, self._value = obj
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
if self._accepted:
self._cache.pop(self._job, None)
# apply callbacks last
if self._callback and self._success:
safe_apply_callback(
self._callback, self._value)
if self._errback and not self._success:
safe_apply_callback(
self._errback, self._value)
finally:
self._mutex.release()
def _ack(self, i, time_accepted, pid):
self._mutex.acquire()
try:
self._accepted = True
self._time_accepted = time_accepted
self._worker_pid = pid
if self._ready:
self._cache.pop(self._job, None)
if self._accept_callback:
safe_apply_callback(
self._accept_callback, pid, time_accepted)
finally:
self._mutex.release()
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback):
ApplyResult.__init__(self, cache, callback)
self._success = True
self._length = length
self._value = [None] * length
self._accepted = [False] * length
self._worker_pid = [None] * length
self._time_accepted = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._ready = True
else:
self._number_left = length // chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i * self._chunksize:(i + 1) * self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
else:
self._success = False
self._value = result
if self._accepted:
self._cache.pop(self._job, None)
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
def _ack(self, i, time_accepted, pid):
start = i * self._chunksize
stop = (i + 1) * self._chunksize
for j in range(start, stop):
self._accepted[j] = True
self._worker_pid[j] = pid
self._time_accepted[j] = time_accepted
if self._ready:
self._cache.pop(self._job, None)
def accepted(self):
return all(self._accepted)
def worker_pids(self):
return filter(None, self._worker_pid)
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
_worker_lost = None
def __init__(self, cache, lost_worker_timeout=10.0):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._ready = False
self._unsorted = {}
self._worker_pids = []
self._lost_worker_timeout = lost_worker_timeout
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise Exception(value)
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
self._ready = True
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._ready = True
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
def _ack(self, i, time_accepted, pid):
self._worker_pids.append(pid)
def ready(self):
return self._ready
def worker_pids(self):
return self._worker_pids
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
self._ready = True
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from multiprocessing.dummy import Process as DummyProcess
Process = DummyProcess
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue.Queue()
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def _poll_result(timeout):
try:
return True, self._quick_get(timeout=timeout)
except Queue.Empty:
return False, None
self._poll_result = _poll_result
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
| 31.819182 | 79 | 0.554282 |
7811bebd99ec25f433c0a6ed27e8a627c3b61246 | 1,982 | py | Python | RCJ_pcms_base/scripts/action_commands/go_to_point.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | 1 | 2021-09-23T09:42:32.000Z | 2021-09-23T09:42:32.000Z | RCJ_pcms_base/scripts/action_commands/go_to_point.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | null | null | null | RCJ_pcms_base/scripts/action_commands/go_to_point.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import json
import sys
from os import path
import rospy
from geometry_msgs.msg import PoseStamped
from rospkg import RosPack
def main(args, goal_pub):
global config
msg = PoseStamped()
if args['point']:
x, y, z, w = args['point']
else:
try:
x, y, z, w = config[args['loc']]
except KeyError:
raise KeyError(f'Location {args["loc"]} does not exist')
msg.header.frame_id = 'map'
msg.pose.position.x = x
msg.pose.position.y = y
msg.pose.orientation.z = z
msg.pose.orientation.w = w
while rospy.get_param('/status_monitor/status_code') != 0:
goal_pub.publish(msg)
if args['wait_until_end']:
while rospy.get_param('/status_monitor/status_code') != 3:
continue
base = RosPack().get_path('rcj_pcms_base')
config = json.load(open(path.join(base, 'config/points.json')))
if __name__ == '__main__':
rospy.init_node('go_to_point', anonymous=True)
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--point', nargs=4,
type=float,
help='point for robot to go, (x, y, z. w)')
parser.add_argument('-l', '--loc', type=str,
help='Publish point based config file "points.json"'
)
parser.add_argument('--wait-until-end', action='store_true',
help="Wait until the slam has end")
args = vars(parser.parse_args())
goal_pub = rospy.Publisher(
'/move_base_simple/goal',
PoseStamped,
queue_size=1
)
try:
if not (args['point'] or args['loc']):
raise Exception('Must specify -p or -l')
elif args['point'] and args['loc']:
raise Exception('Can only specify one of them')
main(args, goal_pub)
sys.exit(0)
except Exception as e:
print(f'Program ended due to: {e}')
sys.exit(1)
| 29.147059 | 76 | 0.584763 |
433ee25d9a374330a03552535f57c0612f6abbd5 | 1,134 | py | Python | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/operator_error.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/operator_error.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/operator_error.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.errors',
marshal='google.ads.googleads.v8',
manifest={
'OperatorErrorEnum',
},
)
class OperatorErrorEnum(proto.Message):
r"""Container for enum describing possible operator errors.
"""
class OperatorError(proto.Enum):
r"""Enum describing possible operator errors."""
UNSPECIFIED = 0
UNKNOWN = 1
OPERATOR_NOT_SUPPORTED = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| 29.076923 | 74 | 0.709877 |
fd0e2fe78c47cd90e97c5735bbb277a23664626c | 8,078 | py | Python | django/apps/config.py | doismellburning/django | 039465a6a7a18f48ea77ceadb6949990c0ec92e1 | [
"BSD-3-Clause"
] | null | null | null | django/apps/config.py | doismellburning/django | 039465a6a7a18f48ea77ceadb6949990c0ec92e1 | [
"BSD-3-Clause"
] | null | null | null | django/apps/config.py | doismellburning/django | 039465a6a7a18f48ea77ceadb6949990c0ec92e1 | [
"BSD-3-Clause"
] | null | null | null | from importlib import import_module
import os
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils.module_loading import module_has_submodule
from django.utils._os import upath
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3.3 _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def check_models_ready(self):
"""
Raises an exception if models haven't been imported yet.
"""
if self.models is None:
raise AppRegistryNotReady(
"Models for app '%s' haven't been imported yet." % self.label)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
self.check_models_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.check_models_ready()
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| 39.404878 | 81 | 0.617108 |
dce732a2e499ad42ab8e275ba3b4a55eaee281b5 | 2,868 | py | Python | src/sima/hla/controlpanel.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/hla/controlpanel.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/hla/controlpanel.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | # This an autogenerated file
#
# Generated with ControlPanel
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.controlpanel import ControlPanelBlueprint
from typing import Dict
from sima.custom.customcomponent import CustomComponent
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
class ControlPanel(MOAO):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
children : List[CustomComponent]
title : str
(default "")
"""
def __init__(self , name="", description="", _id="", title="", **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.children = list()
self.title = title
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return ControlPanelBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def children(self) -> List[CustomComponent]:
""""""
return self.__children
@children.setter
def children(self, value: List[CustomComponent]):
"""Set children"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__children = value
@property
def title(self) -> str:
""""""
return self.__title
@title.setter
def title(self, value: str):
"""Set title"""
self.__title = str(value)
| 25.837838 | 77 | 0.599721 |
6b2e9b0b58d70e87a28968a46061fc81cf050717 | 378 | py | Python | base_app/other_app/models.py | cs-fullstack-fall-2018/django-form-post1-bachmanryan | 377b0c46b7f6aac02b4fe164b612082a0643f0bd | [
"Apache-2.0"
] | null | null | null | base_app/other_app/models.py | cs-fullstack-fall-2018/django-form-post1-bachmanryan | 377b0c46b7f6aac02b4fe164b612082a0643f0bd | [
"Apache-2.0"
] | null | null | null | base_app/other_app/models.py | cs-fullstack-fall-2018/django-form-post1-bachmanryan | 377b0c46b7f6aac02b4fe164b612082a0643f0bd | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.utils import timezone
class Game(models.Model):
name = models.CharField(max_length=100)
genre = models.CharField(max_length=100)
release_date = models.DateTimeField(default=timezone.now)
def release(self):
self.release_date = timezone.now()
self.save()
def __str__(self):
return self.name
| 23.625 | 61 | 0.698413 |
de23fa276fd8c9edb6f7641671a3688d1647ee41 | 3,907 | py | Python | dpr/index_simple_corpus.py | IBM/kgi-slot-filling | 24f8005bc1d010746e046a8c2ec292a2222fff00 | [
"Apache-2.0"
] | 21 | 2021-05-27T23:14:19.000Z | 2022-02-10T06:36:55.000Z | dpr/index_simple_corpus.py | IBM/retrieve-write-slot-filling | 24f8005bc1d010746e046a8c2ec292a2222fff00 | [
"Apache-2.0"
] | 1 | 2021-11-26T04:00:19.000Z | 2021-11-26T04:00:19.000Z | dpr/index_simple_corpus.py | IBM/kgi-slot-filling | 24f8005bc1d010746e046a8c2ec292a2222fff00 | [
"Apache-2.0"
] | 3 | 2021-06-04T13:14:22.000Z | 2022-03-09T14:45:50.000Z | from util.line_corpus import read_lines, write_open, jsonl_files
import ujson as json
import logging
import os
from util.args_help import fill_from_args
import torch
from typing import List
import numpy as np
from dpr.simple_mmap_dataset import gzip_str
from dpr.faiss_index import build_index, IndexOptions
import base64
from util.reporting import Reporting
from transformers import (
DPRContextEncoder,
DPRContextEncoderTokenizerFast,
)
logger = logging.getLogger(__name__)
class Options(IndexOptions):
def __init__(self):
super().__init__()
self.rag_model_name = 'facebook/rag-token-nq'
self.dpr_ctx_encoder_model_name = 'facebook/dpr-ctx_encoder-multiset-base'
self.dpr_ctx_encoder_path = ''
self.embed = '1of1'
self.corpus = ''
self.output_dir = '' # the output_dir will have the passages dataset and the hnsw_index.faiss
self.batch_size = 16
self.__required_args__ = ['output_dir']
opts = Options()
fill_from_args(opts)
torch.set_grad_enabled(False)
device = "cuda" if torch.cuda.is_available() else "cpu"
os.makedirs(opts.output_dir, exist_ok=True)
def embed(doc_batch: List, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> np.ndarray:
documents = {"title": [doci['title'] for doci in doc_batch], 'text': [doci['text'] for doci in doc_batch]}
"""Compute the DPR embeddings of document passages"""
input_ids = ctx_tokenizer(
documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt"
)["input_ids"]
embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output
return embeddings.detach().cpu().to(dtype=torch.float16).numpy()
def write(cur_offset, offsets, passage_file, doc_batch, embeddings):
assert len(doc_batch) == embeddings.shape[0]
assert len(embeddings.shape) == 2
for di, doc in enumerate(doc_batch):
doc['vector'] = base64.b64encode(embeddings[di].astype(np.float16)).decode('ascii')
jstr_gz = gzip_str(json.dumps(doc))
offsets.append(cur_offset)
passage_file.write(jstr_gz)
cur_offset += len(jstr_gz)
return cur_offset
embed_num, embed_count = [int(n.strip()) for n in opts.embed.split('of')]
assert 1 <= embed_num <= embed_count
# And compute the embeddings
ctx_encoder = DPRContextEncoder.from_pretrained(opts.dpr_ctx_encoder_path if opts.dpr_ctx_encoder_path
else opts.dpr_ctx_encoder_model_name).to(device=device)
ctx_encoder.eval()
ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(opts.dpr_ctx_encoder_model_name)
offsets = []
cur_offset = 0
passages = write_open(os.path.join(opts.output_dir, f'passages_{embed_num}_of_{embed_count}.json.gz.records'), binary=True)
report = Reporting()
data_files = jsonl_files(opts.corpus)[embed_num-1::embed_count]
doc_batch = []
for line in read_lines(data_files):
if report.is_time():
print(f'On instance {report.check_count}, {report.check_count/report.elapsed_seconds()} instances per second')
jobj = json.loads(line)
doc_batch.append(jobj)
if len(doc_batch) == opts.batch_size:
embeddings = embed(doc_batch, ctx_encoder, ctx_tokenizer)
cur_offset = write(cur_offset, offsets, passages, doc_batch, embeddings)
doc_batch = []
if len(doc_batch) > 0:
embeddings = embed(doc_batch, ctx_encoder, ctx_tokenizer)
cur_offset = write(cur_offset, offsets, passages, doc_batch, embeddings)
offsets.append(cur_offset) # just the length of the file
passages.close()
with write_open(os.path.join(opts.output_dir, f'offsets_{embed_num}_of_{embed_count}.npy'), binary=True) as f:
np.save(f, np.array(offsets, dtype=np.int64), allow_pickle=False)
if embed_count == 1:
build_index(opts.output_dir, os.path.join(opts.output_dir, 'index.faiss'), opts)
| 38.683168 | 123 | 0.730484 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.