hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
474932dbda9864fddc4432b01c34354f26e33b07 | 9,710 | py | Python | aesara/compile/nanguardmode.py | zaxtax/aesara | ef256e526e313f9e7916f8c9b2c4802c44d0b44a | [
"BSD-3-Clause"
] | null | null | null | aesara/compile/nanguardmode.py | zaxtax/aesara | ef256e526e313f9e7916f8c9b2c4802c44d0b44a | [
"BSD-3-Clause"
] | null | null | null | aesara/compile/nanguardmode.py | zaxtax/aesara | ef256e526e313f9e7916f8c9b2c4802c44d0b44a | [
"BSD-3-Clause"
] | null | null | null | import logging
from collections.abc import ValuesView
from io import StringIO
import numpy as np
import aesara
from aesara.compile.mode import Mode, get_mode
from aesara.configdefaults import config
from aesara.tensor.math import abs as at_abs
from aesara.tensor.math import max as at_max
from aesara.tensor.math import min as at_min
from aesara.tensor.type import discrete_dtypes
try:
from pygpu.gpuarray import GpuArray
from aesara.gpuarray.type import GpuArrayType, _name_for_ctx
pygpu_available = True
except ImportError:
pygpu_available = False
logger = logging.getLogger("aesara.compile.nanguardmode")
def _is_numeric_value(arr, var):
"""
Checks a variable against non-numeric types such as types, slices,
empty arrays, and None, that need not be checked for NaN and Inf values.
Parameters
----------
arr : the data of that correspond to any Aesara Variable
var : The corresponding Aesara variable
Returns
-------
is_non_numeric : bool
`True` the value is non-numeric.
"""
from aesara.link.c.type import _cdata_type
if isinstance(arr, _cdata_type):
return False
elif isinstance(arr, (np.random.mtrand.RandomState, np.random.Generator)):
return False
elif var and getattr(var.tag, "is_rng", False):
return False
elif isinstance(arr, slice):
return False
elif arr is None:
return False
elif arr.size == 0:
return False
return True
def flatten(l):
"""
Turns a nested graph of lists/tuples/other objects into a list of objects.
Parameters
----------
l : list/tuple/other objects
Might be nested.
Returns
-------
object
A flattened list of objects.
"""
if isinstance(l, (list, tuple, ValuesView)):
rval = []
for elem in l:
if isinstance(elem, (list, tuple)):
rval.extend(flatten(elem))
else:
rval.append(elem)
else:
return [l]
return rval
def contains_nan(arr, node=None, var=None):
"""
Test whether a numpy.ndarray contains any `np.nan` values.
Parameters
----------
arr : np.ndarray or output of any Aesara op
node : None or an Apply instance.
If arr is the output of an Aesara op, the node associated to it.
var : The Aesara symbolic variable.
Returns
-------
contains_nan : bool
`True` if the array contains any `np.nan` values, `False` otherwise.
Notes
-----
Tests for the presence of `np.nan`'s using `np.isnan(np.min(ndarray))`.
This approach is faster and more memory efficient than the obvious
alternative, calling `np.any(np.isnan(ndarray))`, which requires the
construction of a boolean array with the same shape as the input array.
"""
if not _is_numeric_value(arr, var):
return False
elif getattr(arr, "dtype", "") in discrete_dtypes:
return False
elif pygpu_available and isinstance(arr, GpuArray):
return np.isnan(f_gpua_min(arr.reshape(arr.size)))
return np.isnan(np.min(arr))
def contains_inf(arr, node=None, var=None):
"""
Test whether a numpy.ndarray contains any `np.inf` values.
Parameters
----------
arr : np.ndarray or output of any Aesara op
node : None or an Apply instance.
If the output of an Aesara op, the node associated to it.
var : The Aesara symbolic variable.
Returns
-------
contains_inf : bool
`True` if the array contains any `np.inf` values, `False` otherwise.
Notes
-----
Tests for the presence of `np.inf`'s by determining whether the
values returned by `np.nanmin(arr)` and `np.nanmax(arr)` are finite.
This approach is more memory efficient than the obvious alternative,
calling `np.any(np.isinf(ndarray))`, which requires the construction of a
boolean array with the same shape as the input array.
"""
if not _is_numeric_value(arr, var):
return False
elif getattr(arr, "dtype", "") in discrete_dtypes:
return False
elif pygpu_available and isinstance(arr, GpuArray):
return np.isinf(f_gpua_min(arr.reshape(arr.size))) or np.isinf(
f_gpua_max(arr.reshape(arr.size))
)
return np.isinf(np.nanmax(arr)) or np.isinf(np.nanmin(arr))
def f_compute(op):
def result(inp):
dtype = inp.dtype
ctx_name = _name_for_ctx(inp.context)
key = (dtype, ctx_name)
f = result.cache.get(key, None)
if f is None:
guard_in = GpuArrayType(str(dtype), (False,), context_name=ctx_name)()
mode = get_mode("FAST_RUN").including("gpuarray")
f = aesara.function([guard_in], op(guard_in), mode=mode, profile=False)
result.cache[key] = f
return f(inp)
result.cache = dict()
return result
f_gpua_min = f_compute(at_min)
f_gpua_max = f_compute(at_max)
f_gpua_absmax = f_compute(lambda x: at_max(at_abs(x)))
class NanGuardMode(Mode):
"""
A Aesara compilation Mode that makes the compiled function automatically
detect NaNs and Infs and detect an error if they occur.
Parameters
----------
nan_is_error : bool
If True, raise an error anytime a NaN is encountered.
inf_is_error : bool
If True, raise an error anytime an Inf is encountered. Note that some
pylearn2 modules currently use np.inf as a default value (e.g.
mlp.max_pool) and these will cause an error if inf_is_error is True.
big_is_error : bool
If True, raise an error when a value greater than 1e10 is encountered.
Notes
-----
We ignore the linker parameter
"""
# We currently loose the 3 first params frequently, when calling
# mode.including() and variant.
def __init__(
self,
nan_is_error=None,
inf_is_error=None,
big_is_error=None,
optimizer="default",
linker=None,
db=None,
):
self.provided_optimizer = optimizer
if nan_is_error is None:
nan_is_error = config.NanGuardMode__nan_is_error
if inf_is_error is None:
inf_is_error = config.NanGuardMode__inf_is_error
if big_is_error is None:
big_is_error = config.NanGuardMode__big_is_error
assert nan_is_error or inf_is_error or big_is_error
def do_check_on(value, nd, var=None):
"""
Checks `value` for NaNs / Infs. If detected, raises an exception
and / or prints information about `nd`, `f`, and `is_input` to
help the user determine the cause of the invalid values.
Parameters
----------
value : numpy.ndarray
The value to be checked.
nd : aesara.graph.basic.Apply
The Apply node being executed.
var : aesara.graph.basic.Variable
Not used if nd is there. Otherwise, used to print the stack
trace for inputs of the graph.
"""
error = False
sio = StringIO()
if nan_is_error:
if contains_nan(value, nd, var):
print("NaN detected", file=sio)
error = True
if inf_is_error:
if contains_inf(value, nd, var):
print("Inf detected", file=sio)
error = True
if big_is_error:
err = False
if not _is_numeric_value(value, var):
err = False
elif pygpu_available and isinstance(value, GpuArray):
err = f_gpua_absmax(value.reshape(value.size)) > 1e10
else:
err = np.abs(value).max() > 1e10
if err:
print("Big value detected", file=sio)
error = True
if error:
if nd:
print(
"NanGuardMode found an error in the "
"output of a node in this variable:",
file=sio,
)
print(aesara.printing.debugprint(nd, file="str"), file=sio)
else:
print(
"NanGuardMode found an error in an input of the " "graph.",
file=sio,
)
# Add the stack trace
if nd:
var = nd.outputs[0]
print(aesara.graph.utils.get_variable_trace_string(var), file=sio)
msg = sio.getvalue()
if config.NanGuardMode__action == "raise":
raise AssertionError(msg)
elif config.NanGuardMode__action == "pdb":
print(msg)
import pdb
pdb.set_trace()
elif config.NanGuardMode__action == "warn":
logger.error(msg)
def nan_check(node, thunk, storage_map, compute_map):
for var in node.outputs:
if compute_map[var][0] and getattr(
var.tag, "nan_guard_mode_check", True
):
do_check_on(storage_map[var][0], node)
def nan_check_input(var, value):
if getattr(var.tag, "nan_guard_mode_check", True):
do_check_on(value, None, var=var)
wrap_linker = aesara.link.vm.VMLinker(
callback=nan_check, callback_input=nan_check_input
)
super().__init__(linker=wrap_linker, optimizer=self.provided_optimizer, db=db)
| 31.836066 | 86 | 0.59104 |
80973b7336d93df00c4e6a4388dde47ccb10bd81 | 11,067 | py | Python | airflow/utils/cli.py | jkugiya/airflow | 1dfbb8d2031cb8a3e95e4bf91aa478857c5c3a85 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 | 2021-01-29T20:33:56.000Z | 2021-08-06T17:35:16.000Z | airflow/utils/cli.py | jkugiya/airflow | 1dfbb8d2031cb8a3e95e4bf91aa478857c5c3a85 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 210 | 2021-07-17T00:25:52.000Z | 2021-12-29T00:44:48.000Z | airflow/utils/cli.py | jkugiya/airflow | 1dfbb8d2031cb8a3e95e4bf91aa478857c5c3a85 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-05-05T07:36:36.000Z | 2021-11-10T17:32:39.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Utilities module for cli"""
import functools
import json
import logging
import os
import re
import socket
import sys
import threading
import traceback
import warnings
from argparse import Namespace
from datetime import datetime
from typing import TYPE_CHECKING, Callable, Optional, TypeVar, cast
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.utils import cli_action_loggers
from airflow.utils.platform import getuser, is_terminal_support_colors
from airflow.utils.session import provide_session
T = TypeVar("T", bound=Callable) # pylint: disable=invalid-name
if TYPE_CHECKING:
from airflow.models import DAG
def _check_cli_args(args):
if not args:
raise ValueError("Args should be set")
if not isinstance(args[0], Namespace):
raise ValueError(
"1st positional argument should be argparse.Namespace instance," f"but is {type(args[0])}"
)
def action_logging(f: T) -> T:
"""
Decorates function to execute function at the same time submitting action_logging
but in CLI context. It will call action logger callbacks twice,
one for pre-execution and the other one for post-execution.
Action logger will be called with below keyword parameters:
sub_command : name of sub-command
start_datetime : start datetime instance by utc
end_datetime : end datetime instance by utc
full_command : full command line arguments
user : current user
log : airflow.models.log.Log ORM instance
dag_id : dag id (optional)
task_id : task_id (optional)
execution_date : execution date (optional)
error : exception instance if there's an exception
:param f: function instance
:return: wrapped function
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
An wrapper for cli functions. It assumes to have Namespace instance
at 1st positional argument
:param args: Positional argument. It assumes to have Namespace instance
at 1st positional argument
:param kwargs: A passthrough keyword argument
"""
_check_cli_args(args)
metrics = _build_metrics(f.__name__, args[0])
cli_action_loggers.on_pre_execution(**metrics)
try:
return f(*args, **kwargs)
except Exception as e:
metrics['error'] = e
raise
finally:
metrics['end_datetime'] = datetime.utcnow()
cli_action_loggers.on_post_execution(**metrics)
return cast(T, wrapper)
def _build_metrics(func_name, namespace):
"""
Builds metrics dict from function args
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "execution_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics
"""
from airflow.models import Log
sub_commands_to_check = {'users', 'connections'}
sensitive_fields = {'-p', '--password', '--conn-password'}
full_command = list(sys.argv)
if full_command[1] in sub_commands_to_check: # pylint: disable=too-many-nested-blocks
for idx, command in enumerate(full_command):
if command in sensitive_fields:
# For cases when password is passed as "--password xyz" (with space between key and value)
full_command[idx + 1] = "*" * 8
else:
# For cases when password is passed as "--password=xyz" (with '=' between key and value)
for sensitive_field in sensitive_fields:
if command.startswith(f'{sensitive_field}='):
full_command[idx] = f'{sensitive_field}={"*" * 8}'
metrics = {
'sub_command': func_name,
'start_datetime': datetime.utcnow(),
'full_command': f'{full_command}',
'user': getuser(),
}
if not isinstance(namespace, Namespace):
raise ValueError(
"namespace argument should be argparse.Namespace instance," f"but is {type(namespace)}"
)
tmp_dic = vars(namespace)
metrics['dag_id'] = tmp_dic.get('dag_id')
metrics['task_id'] = tmp_dic.get('task_id')
metrics['execution_date'] = tmp_dic.get('execution_date')
metrics['host_name'] = socket.gethostname()
extra = json.dumps({k: metrics[k] for k in ('host_name', 'full_command')})
log = Log(
event=f'cli_{func_name}',
task_instance=None,
owner=metrics['user'],
extra=extra,
task_id=metrics.get('task_id'),
dag_id=metrics.get('dag_id'),
execution_date=metrics.get('execution_date'),
)
metrics['log'] = log
return metrics
def process_subdir(subdir: Optional[str]):
"""Expands path to absolute by replacing 'DAGS_FOLDER', '~', '.', etc."""
if subdir:
if not settings.DAGS_FOLDER:
raise ValueError("DAGS_FOLDER variable in settings should be filled.")
subdir = subdir.replace('DAGS_FOLDER', settings.DAGS_FOLDER)
subdir = os.path.abspath(os.path.expanduser(subdir))
return subdir
def get_dag_by_file_location(dag_id: str):
"""Returns DAG of a given dag_id by looking up file location"""
from airflow.models import DagBag, DagModel
# Benefit is that logging from other dags in dagbag will not appear
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise AirflowException(
'dag_id could not be found: {}. Either the dag did not exist or it failed to '
'parse.'.format(dag_id)
)
dagbag = DagBag(dag_folder=dag_model.fileloc)
return dagbag.dags[dag_id]
def get_dag(subdir: Optional[str], dag_id: str) -> "DAG":
"""Returns DAG of a given dag_id"""
from airflow.models import DagBag
dagbag = DagBag(process_subdir(subdir))
if dag_id not in dagbag.dags:
raise AirflowException(
'dag_id could not be found: {}. Either the dag did not exist or it failed to '
'parse.'.format(dag_id)
)
return dagbag.dags[dag_id]
def get_dags(subdir: Optional[str], dag_id: str, use_regex: bool = False):
"""Returns DAG(s) matching a given regex or dag_id"""
from airflow.models import DagBag
if not use_regex:
return [get_dag(subdir, dag_id)]
dagbag = DagBag(process_subdir(subdir))
matched_dags = [dag for dag in dagbag.dags.values() if re.search(dag_id, dag.dag_id)]
if not matched_dags:
raise AirflowException(
'dag_id could not be found with regex: {}. Either the dag did not exist '
'or it failed to parse.'.format(dag_id)
)
return matched_dags
@provide_session
def get_dag_by_pickle(pickle_id, session=None):
"""Fetch DAG from the database using pickling"""
from airflow.models import DagPickle
dag_pickle = session.query(DagPickle).filter(DagPickle.id == pickle_id).first()
if not dag_pickle:
raise AirflowException("Who hid the pickle!? [missing pickle]")
pickle_dag = dag_pickle.pickle
return pickle_dag
def setup_locations(process, pid=None, stdout=None, stderr=None, log=None):
"""Creates logging paths"""
if not stderr:
stderr = os.path.join(settings.AIRFLOW_HOME, f'airflow-{process}.err')
if not stdout:
stdout = os.path.join(settings.AIRFLOW_HOME, f'airflow-{process}.out')
if not log:
log = os.path.join(settings.AIRFLOW_HOME, f'airflow-{process}.log')
if not pid:
pid = os.path.join(settings.AIRFLOW_HOME, f'airflow-{process}.pid')
else:
pid = os.path.abspath(pid)
return pid, stdout, stderr, log
def setup_logging(filename):
"""Creates log file handler for daemon process"""
root = logging.getLogger()
handler = logging.FileHandler(filename)
formatter = logging.Formatter(settings.SIMPLE_LOG_FORMAT)
handler.setFormatter(formatter)
root.addHandler(handler)
root.setLevel(settings.LOGGING_LEVEL)
return handler.stream
def sigint_handler(sig, frame): # pylint: disable=unused-argument
"""
Returns without error on SIGINT or SIGTERM signals in interactive command mode
e.g. CTRL+C or kill <PID>
"""
sys.exit(0)
def sigquit_handler(sig, frame): # pylint: disable=unused-argument
"""
Helps debug deadlocks by printing stacktraces when this gets a SIGQUIT
e.g. kill -s QUIT <PID> or CTRL+\
"""
print(f"Dumping stack traces for all threads in PID {os.getpid()}")
id_to_name = {th.ident: th.name for th in threading.enumerate()}
code = []
for thread_id, stack in sys._current_frames().items(): # pylint: disable=protected-access
code.append(f"\n# Thread: {id_to_name.get(thread_id, '')}({thread_id})")
for filename, line_number, name, line in traceback.extract_stack(stack):
code.append(f'File: "{filename}", line {line_number}, in {name}')
if line:
code.append(f" {line.strip()}")
print("\n".join(code))
class ColorMode:
"""Coloring modes. If `auto` is then automatically detected."""
ON = "on"
OFF = "off"
AUTO = "auto"
def should_use_colors(args) -> bool:
"""Processes arguments and decides whether to enable color in output"""
if args.color == ColorMode.ON:
return True
if args.color == ColorMode.OFF:
return False
return is_terminal_support_colors()
def suppress_logs_and_warning(f: T) -> T:
"""
Decorator to suppress logging and warning messages
in cli functions.
"""
@functools.wraps(f)
def _wrapper(*args, **kwargs):
_check_cli_args(args)
if args[0].verbose:
f(*args, **kwargs)
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
logging.disable(logging.CRITICAL)
try:
f(*args, **kwargs)
finally:
# logging output again depends on the effective
# levels of individual loggers
logging.disable(logging.NOTSET)
return cast(T, _wrapper)
| 34.584375 | 106 | 0.662691 |
207ad4eea49922952bf9a39630cdcd5919138f72 | 5,478 | py | Python | vkwave/api/methods/leads.py | YorkDW/vkwave | 86b0278f15f398217a8211007c44651b6145831b | [
"MIT"
] | null | null | null | vkwave/api/methods/leads.py | YorkDW/vkwave | 86b0278f15f398217a8211007c44651b6145831b | [
"MIT"
] | null | null | null | vkwave/api/methods/leads.py | YorkDW/vkwave | 86b0278f15f398217a8211007c44651b6145831b | [
"MIT"
] | null | null | null | from vkwave.types.responses import *
from ._category import Category
from ._utils import get_params
class Leads(Category):
async def check_user(
self,
lead_id: int,
return_raw_response: bool = False,
test_result: typing.Optional[int] = None,
test_mode: typing.Optional[bool] = None,
auto_start: typing.Optional[bool] = None,
age: typing.Optional[int] = None,
country: typing.Optional[str] = None,
) -> typing.Union[dict, LeadsCheckUserResponse]:
"""
:param lead_id: - Lead ID.
:param test_result: - Value to be return in 'result' field when test mode is used.
:param test_mode:
:param auto_start:
:param age: - User age.
:param country: - User country code.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("checkUser", params)
if return_raw_response:
return raw_result
result = LeadsCheckUserResponse(**raw_result)
return result
async def complete(
self,
vk_sid: str,
secret: str,
return_raw_response: bool = False,
comment: typing.Optional[str] = None,
) -> typing.Union[dict, LeadsCompleteResponse]:
"""
:param vk_sid: - Session obtained as GET parameter when session started.
:param secret: - Secret key from the lead testing interface.
:param comment: - Comment text.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("complete", params)
if return_raw_response:
return raw_result
result = LeadsCompleteResponse(**raw_result)
return result
async def get_stats(
self,
lead_id: int,
return_raw_response: bool = False,
secret: typing.Optional[str] = None,
date_start: typing.Optional[BaseBoolInt] = None,
date_end: typing.Optional[BaseBoolInt] = None,
) -> typing.Union[dict, LeadsGetStatsResponse]:
"""
:param lead_id: - Lead ID.
:param secret: - Secret key obtained from the lead testing interface.
:param date_start: - Day to start stats from (YYYY_MM_DD, e.g.2011-09-17).
:param date_end: - Day to finish stats (YYYY_MM_DD, e.g.2011-09-17).
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getStats", params)
if return_raw_response:
return raw_result
result = LeadsGetStatsResponse(**raw_result)
return result
async def get_users(
self,
offer_id: int,
secret: str,
return_raw_response: bool = False,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
status: typing.Optional[BaseBoolInt] = None,
reverse: typing.Optional[BaseBoolInt] = None,
) -> typing.Union[dict, LeadsGetUsersResponse]:
"""
:param offer_id: - Offer ID.
:param secret: - Secret key obtained in the lead testing interface.
:param offset: - Offset needed to return a specific subset of results.
:param count: - Number of results to return.
:param status: - Action type. Possible values: *'0' — start,, *'1' — finish,, *'2' — blocking users,, *'3' — start in a test mode,, *'4' — finish in a test mode.
:param reverse: - Sort order. Possible values: *'1' — chronological,, *'0' — reverse chronological.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getUsers", params)
if return_raw_response:
return raw_result
result = LeadsGetUsersResponse(**raw_result)
return result
async def metric_hit(
self, data: str, return_raw_response: bool = False,
) -> typing.Union[dict, LeadsMetricHitResponse]:
"""
:param data: - Metric data obtained in the lead interface.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("metricHit", params)
if return_raw_response:
return raw_result
result = LeadsMetricHitResponse(**raw_result)
return result
async def start(
self,
lead_id: int,
secret: str,
return_raw_response: bool = False,
uid: typing.Optional[int] = None,
aid: typing.Optional[int] = None,
test_mode: typing.Optional[bool] = None,
force: typing.Optional[bool] = None,
) -> typing.Union[dict, LeadsStartResponse]:
"""
:param lead_id: - Lead ID.
:param secret: - Secret key from the lead testing interface.
:param uid:
:param aid:
:param test_mode:
:param force:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("start", params)
if return_raw_response:
return raw_result
result = LeadsStartResponse(**raw_result)
return result
| 33.2 | 169 | 0.607886 |
ee96a360468d9b8e5b145213aea158ecebf27e65 | 1,026 | py | Python | staicoin/util/vdf_prover.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | 1 | 2021-12-03T02:39:29.000Z | 2021-12-03T02:39:29.000Z | staicoin/util/vdf_prover.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | null | null | null | staicoin/util/vdf_prover.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | null | null | null | from typing import Tuple
from chiavdf import prove
from staicoin.consensus.constants import ConsensusConstants
from staicoin.types.blockchain_format.classgroup import ClassgroupElement
from staicoin.types.blockchain_format.sized_bytes import bytes32
from staicoin.types.blockchain_format.vdf import VDFInfo, VDFProof
from staicoin.util.ints import uint8, uint64
def get_vdf_info_and_proof(
constants: ConsensusConstants,
vdf_input: ClassgroupElement,
challenge_hash: bytes32,
number_iters: uint64,
normalized_to_identity: bool = False,
) -> Tuple[VDFInfo, VDFProof]:
form_size = ClassgroupElement.get_size(constants)
result: bytes = prove(
bytes(challenge_hash),
vdf_input.data,
constants.DISCRIMINANT_SIZE_BITS,
number_iters,
)
output = ClassgroupElement.from_bytes(result[:form_size])
proof_bytes = result[form_size : 2 * form_size]
return VDFInfo(challenge_hash, number_iters, output), VDFProof(uint8(0), proof_bytes, normalized_to_identity)
| 34.2 | 113 | 0.774854 |
426d25441e87deaa24024091fe7e0a0e622bd6e3 | 1,981 | py | Python | API/onepanman_api/pagination.py | CMS0503/CodeOnBoard | 2df8c9d934f6ffb05dbfbde329f84c66f2348618 | [
"MIT"
] | null | null | null | API/onepanman_api/pagination.py | CMS0503/CodeOnBoard | 2df8c9d934f6ffb05dbfbde329f84c66f2348618 | [
"MIT"
] | 12 | 2020-11-19T09:24:02.000Z | 2020-12-02T11:07:22.000Z | API/onepanman_api/pagination.py | CMS0503/CodeOnBoard | 2df8c9d934f6ffb05dbfbde329f84c66f2348618 | [
"MIT"
] | null | null | null | from rest_framework import pagination
from rest_framework.response import Response
from rest_framework.utils.urls import remove_query_param, replace_query_param
class AppPagination(pagination.PageNumberPagination):
def get_paginated_response(self, data):
next_url = self.get_next_link()
previous_url = self.get_previous_link()
first_url = self.get_first_link()
last_url = self.get_last_link()
links = {
'current': self.get_current_link()
}
if next_url:
links.setdefault('next', next_url)
if previous_url:
links.setdefault('previous', previous_url)
if first_url and first_url != previous_url:
links.setdefault('first', first_url)
if last_url and last_url != next_url:
links.setdefault('last', last_url)
return Response({
'links': links,
'count': self.page.paginator.count,
'results': data,
})
def get_current_link(self):
url = self.request.build_absolute_uri()
if self.page.number == 1:
return remove_query_param(url, self.page_query_param)
return replace_query_param(
url,
self.page_query_param,
self.page.number,
)
def get_first_link(self):
if not self.page.has_previous():
return None
else:
url = self.request.build_absolute_uri()
return remove_query_param(url, self.page_query_param)
def get_last_link(self):
if not self.page.has_next():
return None
else:
url = self.request.build_absolute_uri()
return replace_query_param(
url,
self.page_query_param,
self.page.paginator.num_pages,
)
class CodePagination(pagination.PageNumberPagination):
page_size = 15
page_query_param = 'page_size'
max_page_size = 1000
| 31.951613 | 77 | 0.614841 |
45f03a2ca6dabc16c3ade63a40d62313560cbc4d | 8,774 | py | Python | earth_enterprise/src/scons/getversion.py | art926/earthenterprise | e59a95a395fa0522920048dbadd82204407c15b1 | [
"Apache-2.0"
] | 1 | 2018-11-06T02:40:59.000Z | 2018-11-06T02:40:59.000Z | earth_enterprise/src/scons/getversion.py | pyx61198/earthenterprise | 628271e15c32d0faffc1e67d1df16ff9ec4a4336 | [
"Apache-2.0"
] | null | null | null | earth_enterprise/src/scons/getversion.py | pyx61198/earthenterprise | 628271e15c32d0faffc1e67d1df16ff9ec4a4336 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import argparse
import git
import sys
from datetime import datetime
def GetVersion(backupFile, label=''):
"""As getLongVersion(), but only return the leading *.*.* value."""
raw = GetLongVersion(backupFile, label)
final = raw.split("-")[0]
return final
def GetLongVersion(backupFile, label=''):
"""Create a detailed version string based on the state of
the software, as it exists in the repository."""
if open_gee_version.long_version_string:
return open_gee_version.long_version_string
if _CheckGitAvailable():
ret = _GitGeneratedLongVersion()
# Without git, must use the backup file to create a string.
else:
base = _ReadBackupVersionFile(backupFile)
ret = '-'.join([base, _GetDateString()])
# Append the label, if there is one.
if len(label):
ret = '.'.join([ret, label])
# Cache the long version string:
open_gee_version.long_version_string = ret
return ret
def _GitGeneratedLongVersion():
"""Take the raw information parsed by git, and use it to
generate an appropriate version string for GEE."""
raw = _GetCommitRawDescription()
# For tagged commits, return the tag itself
if _IsCurrentCommitTagged(raw):
return _VersionForTaggedHead(raw)
else:
return _VersionFromTagHistory(raw)
def _IsGitDescribeFirstParentSupported():
"""Checks whether --first-parent parameter is valid for the
version of git available"""
try:
repo = _GetRepository()
repo.git.describe('--first-parent')
return True
except git.exc.GitCommandError:
pass
return False
def _GetCommitRawDescription():
"""Returns description of current commit"""
args = ['--tags', '--match', '[0-9]*\.[0-9]*\.[0-9]*\-*']
if _IsGitDescribeFirstParentSupported():
args.insert(0, '--first-parent')
repo = _GetRepository()
raw = repo.git.describe(*args)
raw = raw.rstrip()
return raw
def _IsCurrentCommitTagged(raw):
"""True if the current commit is tagged, otherwise False"""
# If this condition hits, then we are currently on a tagged commit.
return (len(raw.split("-")) < 4)
def _VersionForTaggedHead(raw):
"""When we're on the tagged commit, the version string is
either the tag itself (when repo is clean), or the tag with
date appended (when repo has uncommitted changes)"""
if _CheckDirtyRepository():
# Append the date if the repo contains uncommitted files
return '.'.join([raw, _GetDateString()])
return raw
def _VersionFromTagHistory(raw):
"""From the HEAD revision, this function finds the most recent
reachable version tag and returns a string representing the
version being built -- which is one version beyond the latest
found in the history."""
# Tear apart the information in the version string.
components = _ParseRawVersionString(raw)
# Determine how to update, since we are *not* on tagged commit.
if components['isFinal']:
components['patch'] = 0
components['patchType'] = "alpha"
components['revision'] = components['revision'] + 1
else:
components['patch'] = components['patch'] + 1
# Rebuild.
base = '.'.join([str(components[x]) for x in ("major", "minor", "revision")])
patch = '.'.join([str(components["patch"]), components["patchType"], _GetDateString()])
if not _CheckDirtyRepository():
patch = '.'.join([patch, components['hash']])
return '-'.join([base, patch])
def _ParseRawVersionString(raw):
"""Break apart a raw version string into its various components,
and return those entries via a dictionary."""
components = { }
# major.minor.revision-patch[.patchType][-commits][-hash]
rawComponents = raw.split("-")
base = rawComponents[0]
patchRaw = '' if not len(rawComponents) > 1 else rawComponents[1]
components['commits'] = -1 if not len(rawComponents) > 2 else rawComponents[2]
components['hash'] = None if not len(rawComponents) > 3 else rawComponents[3]
# Primary version (major.minor.revision)
baseComponents = base.split(".")
components['major'] = int(baseComponents[0])
components['minor'] = int(baseComponents[1])
components['revision'] = int(baseComponents[2])
# Patch (patch[.patchType])
components['isFinal'] = ((patchRaw[-5:] == "final") or
(patchRaw[-7:] == "release"))
patchComponents = patchRaw.split(".")
components['patch'] = int(patchComponents[0])
components['patchType'] = 'alpha' if not len(patchComponents) > 1 else patchComponents[1]
repo = _GetRepository()
return components
def _CheckGitAvailable():
"""Try the most basic of git commands, to see if there is
currently any access to a repository."""
try:
repo = _GetRepository()
except git.exc.InvalidGitRepositoryError:
return False
return True
def _GetRepository():
"""Get a reference to the Git Repository.
Is there a cleaner option than searching from the current location?"""
# The syntax is different between library versions (particularly,
# those used by Centos 6 vs Centos 7).
try:
return git.Repo('.', search_parent_directories=True)
except TypeError:
return git.Repo('.')
def _CheckDirtyRepository():
"""Check to see if the repository is not in a cleanly committed state."""
repo = _GetRepository()
str = repo.git.status("--porcelain")
return (len(str) > 0)
def _ReadBackupVersionFile(target):
"""There should be a file checked in with the latest version
information available; if git isn't available to provide
information, then use this file instead."""
with open(target, 'r') as fp:
line = fp.readline()
return line
def _GetDateString():
"""Returns formatted date string representing current UTC time"""
return datetime.utcnow().strftime("%Y%m%d%H%M")
class OpenGeeVersion(object):
"""A class for storing Open GEE version information."""
def __init__(self):
# Cache version strings:
self.short_version_string = None
self.long_version_string = None
# Default parameter for GetVersion functions
self_path, _ = os.path.split(os.path.realpath(__file__))
self.backup_file = os.path.join(self_path, '..', 'version.txt')
self.label = ''
def get_short(self):
"""Returns the short version string."""
if not self.short_version_string:
self.short_version_string = self.get_long().split("-")[0]
return self.short_version_string
def set_short(self, value):
"""Overrides the short version string by using the given value."""
self.short_version_string = value
def get_long(self):
"""Returns the short version string."""
if not self.long_version_string:
self.long_version_string = GetLongVersion(self.backup_file, self.label)
return self.long_version_string
def set_long(self, value):
"""Overrides the long version string by using the given value.
Overriding the long version string would indirectly override the short
version string, as well, unless the former is also overridden.
"""
self.long_version_string = value
def get_warning_message(self):
"""Returns None, or a string describing known issues."""
return None if _IsGitDescribeFirstParentSupported() else '''\
WARNING: Git version 1.8.4 or later is required to correctly determine the Open GEE version being built.
The Open GEE version is calculated from tags using the "git describe" command.
The "--first-parent" parameter introduced in Git 1.8.4 allows proper version calcuation on all branches.
Without the --first-parent parameter, the version calculated may be incorrect, depending on which branch is being built.
For information on upgrading Git, see:
https://github.com/google/earthenterprise/wiki/Frequently-Asked-Questions-(FAQs)#how-do-i-upgrade-git-to-the-recommended-version-for-building-google-earth-enterprise\
'''
# Exported variable for use by other modules:
open_gee_version = OpenGeeVersion()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--long", action="store_true", help="Output long format of version string")
args = parser.parse_args()
print open_gee_version.get_long() if args.long else open_gee_version.get_short()
warning_message = open_gee_version.get_warning_message()
if warning_message is not None:
print >> sys.stderr, warning_message
__all__ = ['open_gee_version']
if __name__ == "__main__":
main()
| 31.67509 | 166 | 0.676772 |
6c4370a9e23c5c57b35b74a67feebae246467bb2 | 15,172 | py | Python | pytype/tests/test_typevar.py | ashwinprasadme/pytype | fed209c73aacfcab15efc33deef3b4016a67cfe5 | [
"Apache-2.0"
] | null | null | null | pytype/tests/test_typevar.py | ashwinprasadme/pytype | fed209c73aacfcab15efc33deef3b4016a67cfe5 | [
"Apache-2.0"
] | null | null | null | pytype/tests/test_typevar.py | ashwinprasadme/pytype | fed209c73aacfcab15efc33deef3b4016a67cfe5 | [
"Apache-2.0"
] | null | null | null | """Tests for TypeVar."""
from pytype import file_utils
from pytype.tests import test_base
class TypeVarTest(test_base.TargetIndependentTest):
"""Tests for TypeVar."""
def test_unused_typevar(self):
ty = self.Infer("""
from typing import TypeVar
T = TypeVar("T")
""", deep=False)
self.assertTypesMatchPytd(ty, """
from typing import TypeVar
T = TypeVar("T")
""")
def test_import_typevar(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """T = TypeVar("T")""")
ty = self.Infer("""
from a import T
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import TypeVar
T = TypeVar("T")
""")
def test_invalid_typevar(self):
ty, errors = self.InferWithErrors("""
from typing import TypeVar
typevar = TypeVar
T = typevar() # invalid-typevar[e1]
T = typevar("T") # ok
T = typevar(42) # invalid-typevar[e2]
T = typevar(str()) # invalid-typevar[e3]
T = typevar("T", str, int if __random__ else float) # invalid-typevar[e4]
T = typevar("T", 0, float) # invalid-typevar[e5]
T = typevar("T", str) # invalid-typevar[e6]
# pytype: disable=not-supported-yet
S = typevar("S", covariant=False) # ok
T = typevar("T", covariant=False) # duplicate ok
# pytype: enable=not-supported-yet
""")
self.assertTypesMatchPytd(ty, """
from typing import TypeVar
typevar = ... # type: type
S = TypeVar("S")
T = TypeVar("T")
""")
self.assertErrorRegexes(errors, {
"e1": r"wrong arguments", "e2": r"Expected.*str.*Actual.*int",
"e3": r"constant str", "e4": r"constraint.*Must be constant",
"e5": r"Expected.*_1:.*type.*Actual.*_1: int", "e6": r"0 or more than 1"
})
def test_print_constraints(self):
ty = self.Infer("""
from typing import List, TypeVar
S = TypeVar("S", int, float, covariant=True) # pytype: disable=not-supported-yet
T = TypeVar("T", int, float)
U = TypeVar("U", List[int], List[float])
""", deep=False)
# The "covariant" keyword is ignored for now.
self.assertTypesMatchPytd(ty, """
from typing import List, TypeVar
S = TypeVar("S", int, float)
T = TypeVar("T", int, float)
U = TypeVar("U", List[int], List[float])
""")
def test_infer_typevars(self):
ty = self.Infer("""
def id(x):
return x
def wrap_tuple(x, y):
return (x, y)
def wrap_list(x, y):
return [x, y]
def wrap_dict(x, y):
return {x: y}
def return_second(x, y):
return y
""")
self.assertTypesMatchPytd(ty, """
from typing import Dict, List, Tuple, Union
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
def id(x: _T0) -> _T0: ...
def wrap_tuple(x: _T0, y: _T1) -> Tuple[_T0, _T1]: ...
def wrap_list(x: _T0, y: _T1) -> List[Union[_T0, _T1]]: ...
def wrap_dict(x: _T0, y: _T1) -> Dict[_T0, _T1]: ...
def return_second(x, y: _T1) -> _T1: ...
""")
def test_infer_union(self):
ty = self.Infer("""
def return_either(x, y):
return x or y
def return_arg_or_42(x):
return x or 42
""")
self.assertTypesMatchPytd(ty, """
from typing import Union
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
def return_either(x: _T0, y: _T1) -> Union[_T0, _T1]: ...
def return_arg_or_42(x: _T0) -> Union[_T0, int]: ...
""")
def test_typevar_in_type_comment(self):
self.InferWithErrors("""
from typing import List, TypeVar
T = TypeVar("T")
x = None # type: T # not-supported-yet
y = None # type: List[T] # not-supported-yet
""")
def test_base_class_with_typevar(self):
ty = self.Infer("""
from typing import List, TypeVar
T = TypeVar("T")
class A(List[T]): pass
""")
self.assertTypesMatchPytd(ty, """
from typing import List, TypeVar
T = TypeVar("T")
class A(List[T]): ...
""")
def test_overwrite_base_class_with_typevar(self):
self.Check("""
from typing import List, TypeVar
T = TypeVar("T")
l = List[T]
l = list
class X(l): pass
""")
def test_bound(self):
self.InferWithErrors("""
from typing import TypeVar
T = TypeVar("T", int, float, bound=str) # invalid-typevar
S = TypeVar("S", bound="") # invalid-typevar
U = TypeVar("U", bound=str) # ok
V = TypeVar("V", bound=int if __random__ else float) # invalid-typevar
""")
def test_covariant(self):
_, errors = self.InferWithErrors("""
from typing import TypeVar
T = TypeVar("T", covariant=True) # not-supported-yet
S = TypeVar("S", covariant=42) # invalid-typevar[e1]
U = TypeVar("U", covariant=True if __random__ else False) # invalid-typevar[e2]
""")
self.assertErrorRegexes(
errors, {"e1": r"Expected.*bool.*Actual.*int", "e2": r"constant"})
def test_contravariant(self):
_, errors = self.InferWithErrors("""
from typing import TypeVar
T = TypeVar("T", contravariant=True) # not-supported-yet
S = TypeVar("S", contravariant=42) # invalid-typevar[e1]
U = TypeVar("U", contravariant=True if __random__ else False) # invalid-typevar[e2]
""")
self.assertErrorRegexes(
errors, {"e1": r"Expected.*bool.*Actual.*int", "e2": r"constant"})
def test_dont_propagate_pyval(self):
# in functions like f(x: T) -> T, if T has constraints we should not copy
# the value of constant types between instances of the typevar.
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import TypeVar
AnyInt = TypeVar('AnyInt', int)
def f(x: AnyInt) -> AnyInt: ...
""")
ty = self.Infer("""
import a
if a.f(0):
x = 3
if a.f(1):
y = 3
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: module
x = ... # type: int
y = ... # type: int
""")
def test_property_type_param(self):
# We should allow property signatures of the form f(self: T) -> X[T]
# without complaining about the class not being parametrised over T
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import TypeVar, List
T = TypeVar('T')
class A(object):
@property
def foo(self: T) -> List[T]: ...
class B(A): ...
""")
ty = self.Infer("""
import a
x = a.A().foo
y = a.B().foo
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import List
import a
a = ... # type: module
x = ... # type: List[a.A]
y = ... # type: List[a.B]
""")
def test_property_type_param2(self):
# Test for classes inheriting from Generic[X]
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import TypeVar, List, Generic
T = TypeVar('T')
U = TypeVar('U')
class A(Generic[U]):
@property
def foo(self: T) -> List[T]: ...
class B(A, Generic[U]): ...
def make_A() -> A[int]: ...
def make_B() -> B[int]: ...
""")
ty = self.Infer("""
import a
x = a.make_A().foo
y = a.make_B().foo
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import List
import a
a = ... # type: module
x = ... # type: List[a.A[int]]
y = ... # type: List[a.B[int]]
""")
# Skipping due to b/66005735
@test_base.skip("Type parameter bug")
def test_property_type_param3(self):
# Don't mix up the class parameter and the property parameter
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import TypeVar, List, Generic
T = TypeVar('T')
U = TypeVar('U')
class A(Generic[U]):
@property
def foo(self: T) -> List[U]: ...
def make_A() -> A[int]: ...
""")
ty = self.Infer("""
import a
x = a.make_A().foo
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: module
x = ... # type: List[int]
""")
def test_property_type_param_with_constraints(self):
# Test setting self to a constrained type
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import TypeVar, List, Generic
T = TypeVar('T')
U = TypeVar('U', int, str)
X = TypeVar('X', int)
class A(Generic[U]):
@property
def foo(self: A[X]) -> List[X]: ...
def make_A() -> A[int]: ...
""")
ty = self.Infer("""
import a
x = a.make_A().foo
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import List
a = ... # type: module
x = ... # type: List[int]
""")
def test_classmethod_type_param(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import TypeVar, List, Type
T = TypeVar('T')
class A(object):
@classmethod
def foo(self: Type[T]) -> List[T]: ...
class B(A): ...
""")
ty = self.Infer("""
import a
v = a.A.foo()
w = a.B.foo()
x = a.A().foo()
y = a.B().foo()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import List
import a
a = ... # type: module
v = ... # type: List[a.A]
w = ... # type: List[a.B]
x = ... # type: List[a.A]
y = ... # type: List[a.B]
""")
def test_metaclass_property_type_param(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import TypeVar, Type, List
T = TypeVar('T')
class Meta():
@property
def foo(self: Type[T]) -> List[T]: ...
class A(metaclass=Meta):
pass
""")
ty = self.Infer("""
import a
x = a.A.foo
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import List
import a
a = ... # type: module
x = ... # type: List[a.A]
""")
def test_top_level_union(self):
ty = self.Infer("""
from typing import TypeVar
if __random__:
T = TypeVar("T")
else:
T = 42
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
T = ... # type: Any
""")
def test_store_typevar_in_dict(self):
"""Convert a typevar to Any when stored as a dict value."""
# See abstract.Dict.setitem_slot for why this is needed.
ty = self.Infer("""
from typing import TypeVar
T = TypeVar("T")
a = {'key': T}
""")
self.assertTypesMatchPytd(ty, """
from typing import Any, Dict, TypeVar
a = ... # type: Dict[str, Any]
T = TypeVar('T')
""")
def test_late_bound(self):
_, errors = self.InferWithErrors("""
from typing import TypeVar, Union
T = TypeVar("T", int, float, bound="str") # invalid-typevar[e1]
S = TypeVar("S", bound="") # invalid-typevar[e2]
U = TypeVar("U", bound="str") # ok
V = TypeVar("V", bound="int if __random__ else float") # invalid-typevar[e3]
W = TypeVar("W", bound="Foo") # ok, forward reference
X = TypeVar("X", bound="Bar") # name-error[e4]
class Foo:
pass
""")
self.assertErrorRegexes(errors, {
"e1": r"mutually exclusive", "e2": r"empty string",
"e3": r"Must be constant", "e4": r"Name.*Bar"})
def test_late_constraints(self):
ty = self.Infer("""
from typing import List, TypeVar
S = TypeVar("S", int, float)
T = TypeVar("T", "int", "float")
U = TypeVar("U", "List[int]", List[float])
V = TypeVar("V", "Foo", "List[Foo]")
class Foo:
pass
""", deep=False)
self.assertTypesMatchPytd(ty, """
from typing import List, TypeVar
S = TypeVar("S", int, float)
T = TypeVar("T", int, float)
U = TypeVar("U", List[int], List[float])
V = TypeVar("V", Foo, List[Foo])
class Foo:
pass
""")
def test_typevar_in_alias(self):
ty = self.Infer("""
from typing import TypeVar, Union
T = TypeVar("T", int, float)
Num = Union[T, complex]
x = 10 # type: Num[int]
""")
self.assertTypesMatchPytd(ty, """
from typing import Any, TypeVar, Union
T = TypeVar("T", int, float)
Num: Any
x: Union[int, complex] = ...
""")
def test_recursive_alias(self):
errors = self.CheckWithErrors("""
from typing import Any, Iterable, TypeVar, Union
T = TypeVar("T")
X = Union[Any, Iterable["X"]] # not-supported-yet[e]
Y = Union[Any, X]
""")
self.assertErrorRegexes(errors, {"e": r"Recursive.*X"})
def test_type_of_typevar(self):
self.Check("""
from typing import Sequence, TypeVar
T = TypeVar('T')
def f(x): # type: (Sequence[T]) -> Sequence[T]
print(type(x))
return x
""")
def test_type_of_typevar_error(self):
errors = self.CheckWithErrors("""
from typing import Sequence, Type, TypeVar
T = TypeVar('T')
def f(x): # type: (int) -> int
return x
def g(x): # type: (Sequence[T]) -> Type[Sequence[T]]
return f(type(x)) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": "Expected.*int.*Actual.*Sequence"})
def test_typevar_in_constant(self):
ty = self.Infer("""
from typing import TypeVar
T = TypeVar('T')
class Foo(object):
def __init__(self):
self.f1 = self.f2
def f2(self, x):
# type: (T) -> T
return x
""")
self.assertTypesMatchPytd(ty, """
from typing import Callable, TypeVar
T = TypeVar('T')
class Foo:
f1: Callable[[T], T]
def __init__(self) -> None: ...
def f2(self, x: T) -> T: ...
""")
def test_extra_arguments(self):
_, errors = self.InferWithErrors("""
from typing import TypeVar
T = TypeVar("T", extra_arg=42) # invalid-typevar[e1]
S = TypeVar("S", *__any_object__) # invalid-typevar[e2]
U = TypeVar("U", **__any_object__) # invalid-typevar[e3]
""")
self.assertErrorRegexes(errors, {
"e1": r"extra_arg", "e2": r"\*args", "e3": r"\*\*kwargs"})
def test_simplify_args_and_kwargs(self):
ty = self.Infer("""
from typing import TypeVar
constraints = (int, str)
kwargs = {"covariant": True}
T = TypeVar("T", *constraints, **kwargs) # pytype: disable=not-supported-yet
""", deep=False)
self.assertTypesMatchPytd(ty, """
from typing import Dict, Tuple, Type, TypeVar
T = TypeVar("T", int, str)
constraints = ... # type: Tuple[Type[int], Type[str]]
kwargs = ... # type: Dict[str, bool]
""")
test_base.main(globals(), __name__ == "__main__")
| 30.774848 | 90 | 0.546797 |
c22e034d458d18eae13425ea65f80c98ee5f6afc | 3,185 | py | Python | organizing/efficient_storage.py | Elaech/Fruits-262 | a03052473402fcff4adddd65517e34583e9d9c73 | [
"MIT"
] | 1 | 2021-06-25T17:53:31.000Z | 2021-06-25T17:53:31.000Z | organizing/efficient_storage.py | Elaech/Fruits-262 | a03052473402fcff4adddd65517e34583e9d9c73 | [
"MIT"
] | null | null | null | organizing/efficient_storage.py | Elaech/Fruits-262 | a03052473402fcff4adddd65517e34583e9d9c73 | [
"MIT"
] | null | null | null | import numpy as np
import os
import json
import PIL.Image as Image
import time
import sys
label_to_number_dict = None
number_to_label_dict = None
def load_dictionaries():
"""
Loads the dictionaries that map the name of the fruit to a number and vice versa
"""
global label_to_number_dict
global number_to_label_dict
with open("labels/label_to_number_dict.json", "r") as f:
label_to_number_dict = json.load(f)
with open("labels/number_to_label_dict.json", "r") as f:
number_to_label_dict = json.load(f)
def get_label(path_to_image):
"""
:param path_to_image: path to the selected image
:return: the number representing the label for the said image
"""
return label_to_number_dict[os.path.basename(os.path.dirname(path_to_image))]
def build_dataset_parts(json_path, save_path, part_max_size):
"""
Builds numpy arrays of the images and their labels
Stores them on the HDD/SSD/etc in multiple parts as npz format
Stores the images and labels separately but in the same order
At the end stores the last part of data even it is smaller than the rest
:param json_path: path to a json file containing
:param save_path: path to save the npz files
:param part_max_size: max bytes (on storage device) for each part
"""
part_index = 0
with open(json_path, "r") as input:
image_paths = json.load(input)
features = []
labels = []
current_mem_size = 0
for index in range(len(image_paths)):
# Print Progress
if index % 1000 == 0:
print(f"Processed images up to index {index}")
print(current_mem_size, " bytes")
# Append to the current part
img = np.array(Image.open(image_paths[index]))
features.append(img)
labels.append(get_label(image_paths[index]))
current_mem_size += sys.getsizeof(img)
# Save current part to disk and start a new one
if current_mem_size > part_max_size:
print(f"Saving to disk part {part_index} of {current_mem_size} bytes")
features = np.asarray(features)
labels = np.asarray(labels)
np.savez_compressed(save_path + '/features' + str(part_index) + ".npz", features)
np.savez_compressed(save_path + '/labels' + str(part_index) + ".npz", labels)
part_index += 1
del features
del labels
features = []
labels = []
current_mem_size = 0
# If some data remains unsaved at the end then save it as the last part
if features[0] is not None:
np.savez_compressed(save_path + '/features' + str(part_index) + ".npz", features)
np.savez_compressed(save_path + '/labels' + str(part_index) + ".npz", labels)
if __name__ == '__main__':
load_dictionaries()
# Image Dimensions
WIDTH = 208
HEIGHT = 256
# Maximum Storage Bytes per Part
MAX_BYTES = 2147483648
x = time.time()
build_dataset_parts(f"paths/{WIDTH}x{HEIGHT}/train.json",
f"../DatasetBinaryStorage/{WIDTH}x{HEIGHT}/train",
MAX_BYTES)
print(time.time() - x)
| 34.619565 | 93 | 0.649608 |
96c7d6731e1840b6646521372208dfc056d8ef8d | 1,209 | py | Python | test/test_ad_group_ex.py | wangjoshuah/Amazon-Ads-Sponsored-Products-API-Python-Client | 98a511a0544d28aac06529c13f4921c19ae8ec66 | [
"MIT"
] | null | null | null | test/test_ad_group_ex.py | wangjoshuah/Amazon-Ads-Sponsored-Products-API-Python-Client | 98a511a0544d28aac06529c13f4921c19ae8ec66 | [
"MIT"
] | null | null | null | test/test_ad_group_ex.py | wangjoshuah/Amazon-Ads-Sponsored-Products-API-Python-Client | 98a511a0544d28aac06529c13f4921c19ae8ec66 | [
"MIT"
] | null | null | null | """
Amazon Ads API - Sponsored Products
Use the Amazon Ads API for Sponsored Products for campaign, ad group, keyword, negative keyword, and product ad management operations. For more information about Sponsored Products, see the [Sponsored Products Support Center](https://advertising.amazon.com/help?entityId=ENTITY3CWETCZD9HEG2#GWGFKPEWVWG2CLUJ). For onboarding information, see the [account setup](setting-up/account-setup) topic.<br/><br/> # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import amazon_ads_sponsored_products_client
from amazon_ads_sponsored_products_client.model.state import State
globals()['State'] = State
from amazon_ads_sponsored_products_client.model.ad_group_ex import AdGroupEx
class TestAdGroupEx(unittest.TestCase):
"""AdGroupEx unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAdGroupEx(self):
"""Test AdGroupEx"""
# FIXME: construct object with mandatory attributes with example values
# model = AdGroupEx() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 31.815789 | 423 | 0.735318 |
d4bc23bd8b04ecaf7bb26780a60043b79251b408 | 2,548 | py | Python | uai/operation/stop_uaiservice/stop_uaiservice.py | FinchZHU/uai-sdk | 78e06bebba2d18233ce6dcb5be619e940f7a7ef3 | [
"Apache-2.0"
] | 38 | 2017-04-26T04:00:09.000Z | 2022-02-10T02:51:05.000Z | uai/operation/stop_uaiservice/stop_uaiservice.py | FinchZHU/uai-sdk | 78e06bebba2d18233ce6dcb5be619e940f7a7ef3 | [
"Apache-2.0"
] | 17 | 2017-11-20T20:47:09.000Z | 2022-02-09T23:48:46.000Z | uai/operation/stop_uaiservice/stop_uaiservice.py | FinchZHU/uai-sdk | 78e06bebba2d18233ce6dcb5be619e940f7a7ef3 | [
"Apache-2.0"
] | 28 | 2017-07-08T05:23:13.000Z | 2020-08-18T03:12:27.000Z | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from uai.utils.utils import parse_unrequired_args
from uai.operation.base_operation import BaseUaiServiceOp
from uai.api.stop_uai_service import StopUAIServiceApiOp
class UaiServiceStopServiceOp(BaseUaiServiceOp):
"""
Base Stop Service Tool with UAI
"""
def __init__(self, parser):
super(UaiServiceStopServiceOp, self).__init__(parser)
def __add_stop_args(self, stop_parser):
stop_parse = stop_parser.add_argument_group(
'Stop-Params', 'Stop Parameters, help to stop service'
)
stop_parse.add_argument(
'--service_id',
type=str,
required=True,
help='the service id of UAI Inference'
)
stop_parse.add_argument(
'--srv_version',
type=str,
required=False,
help='the service verison of UAI Inference'
)
def _add_args(self):
super(UaiServiceStopServiceOp, self)._add_args()
self.__add_stop_args(self.parser)
def _parse_stop_args(self, args):
self.service_id = args['service_id']
self.srv_version = parse_unrequired_args('srv_version', args)
def _parse_args(self, args):
super(UaiServiceStopServiceOp, self)._parse_args(args)
self._parse_stop_args(args)
def cmd_run(self, args):
self._parse_args(args)
stopOp = StopUAIServiceApiOp(
public_key=self.public_key,
private_key=self.private_key,
project_id=self.project_id,
region=self.region,
zone=self.zone,
service_id=self.service_id,
srv_version=self.srv_version,
)
succ, rsp = stopOp.call_api()
if not succ:
raise RuntimeError('Call StopUAIService error, Error message: {0}'.format(rsp['Message']))
return succ, rsp
| 34.432432 | 103 | 0.64325 |
dc5b90196edc767b44e8c2725745feb2046844b8 | 1,811 | py | Python | sdks/python/apache_beam/examples/complete/estimate_pi_test.py | ravwojdyla/beam | fbcde4cdc7d68de8734bf540c079b2747631a854 | [
"Apache-2.0"
] | 2 | 2017-02-22T03:35:11.000Z | 2017-04-05T09:38:16.000Z | sdks/python/apache_beam/examples/complete/estimate_pi_test.py | kavyasmj/beam0.6 | d59dfeb339bd56feb7569531e5c421a297b0d3dc | [
"Apache-2.0"
] | 2 | 2017-04-24T20:32:25.000Z | 2022-03-29T12:59:55.000Z | sdks/python/apache_beam/examples/complete/estimate_pi_test.py | kavyasmj/beam0.6 | d59dfeb339bd56feb7569531e5c421a297b0d3dc | [
"Apache-2.0"
] | 2 | 2019-03-04T02:12:46.000Z | 2021-08-10T20:29:37.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the estimate_pi example."""
import logging
import unittest
from apache_beam.examples.complete import estimate_pi
from apache_beam.test_pipeline import TestPipeline
from apache_beam.transforms.util import assert_that
from apache_beam.transforms.util import BeamAssertException
def in_between(lower, upper):
def _in_between(actual):
_, _, estimate = actual[0]
if estimate < lower or estimate > upper:
raise BeamAssertException(
'Failed assert: %f not in [%f, %f]' % (estimate, lower, upper))
return _in_between
class EstimatePiTest(unittest.TestCase):
def test_basics(self):
p = TestPipeline()
result = p | 'Estimate' >> estimate_pi.EstimatePiTransform(5000)
# Note: Probabilistically speaking this test can fail with a probability
# that is very small (VERY) given that we run at least 500 thousand trials.
assert_that(result, in_between(3.125, 3.155))
p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 34.169811 | 79 | 0.751518 |
f4bab4bb9fa07022dbbcf99b12ebbe2dc0358599 | 10,555 | py | Python | perfkitbenchmarker/linux_packages/cassandra.py | justinuang/PerfKitBenchmarker | 0730a7a6ebcd9447c7667ff6a3902c203d85fadb | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/cassandra.py | justinuang/PerfKitBenchmarker | 0730a7a6ebcd9447c7667ff6a3902c203d85fadb | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/cassandra.py | justinuang/PerfKitBenchmarker | 0730a7a6ebcd9447c7667ff6a3902c203d85fadb | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Installs/Configures Cassandra.
See 'perfkitbenchmarker/data/cassandra/' for configuration files used.
Cassandra homepage: http://cassandra.apache.org
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import posixpath
import time
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import os_types
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import INSTALL_DIR
from perfkitbenchmarker.linux_packages.ant import ANT_HOME_DIR
from six.moves import range
JNA_JAR_URL = ('https://maven.java.net/content/repositories/releases/'
'net/java/dev/jna/jna/4.1.0/jna-4.1.0.jar')
CASSANDRA_GIT_REPRO = 'https://github.com/apache/cassandra.git'
CASSANDRA_VERSION = 'cassandra-2.1'
CASSANDRA_YAML_TEMPLATE = 'cassandra/cassandra.yaml.j2'
CASSANDRA_ENV_TEMPLATE = 'cassandra/cassandra-env.sh.j2'
CASSANDRA_DIR = posixpath.join(INSTALL_DIR, 'cassandra')
CASSANDRA_PID = posixpath.join(CASSANDRA_DIR, 'cassandra.pid')
CASSANDRA_OUT = posixpath.join(CASSANDRA_DIR, 'cassandra.out')
CASSANDRA_ERR = posixpath.join(CASSANDRA_DIR, 'cassandra.err')
NODETOOL = posixpath.join(CASSANDRA_DIR, 'bin', 'nodetool')
# Number of times to attempt to start the cluster.
CLUSTER_START_TRIES = 10
CLUSTER_START_SLEEP = 60
# Time, in seconds, to sleep between node starts.
NODE_START_SLEEP = 5
# for setting a maven repo with --cassandra_maven_repo_url
_MAVEN_REPO_PARAMS = """
artifact.remoteRepository.central: {0}
artifact.remoteRepository.apache: {0}
"""
FLAGS = flags.FLAGS
flags.DEFINE_integer('cassandra_replication_factor', 3, 'Num of replicas.')
flags.DEFINE_integer('cassandra_concurrent_reads', 32,
'Concurrent read requests each server accepts.')
# Partial list of known mirrors:
# https://repo.maven.apache.org/maven2/.meta/repository-metadata.xml
# See instructions for setting up own mirror:
# https://maven.apache.org/guides/mini/guide-mirror-settings.html
flags.DEFINE_boolean('cassandra_maven_repo_url', None,
'Optional maven repo mirror to use.')
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
for resource in (CASSANDRA_YAML_TEMPLATE,
CASSANDRA_ENV_TEMPLATE):
data.ResourcePath(resource)
def _Install(vm):
"""Installs Cassandra from a tarball."""
vm.Install('ant')
vm.Install('build_tools')
vm.Install('openjdk')
vm.Install('curl')
vm.RemoteCommand('cd {0}; git clone {1}; cd {2}; git checkout {3}'.format(
INSTALL_DIR, CASSANDRA_GIT_REPRO, CASSANDRA_DIR, CASSANDRA_VERSION))
if FLAGS.cassandra_maven_repo_url:
# sets maven repo properties in the build.properties
file_contents = _MAVEN_REPO_PARAMS.format(FLAGS.cassandra_maven_repo_url)
vm.RemoteCommand('echo "{}" > {}/build.properties'.format(
file_contents, CASSANDRA_DIR))
vm.RemoteCommand('cd {}; {}/bin/ant'.format(CASSANDRA_DIR, ANT_HOME_DIR))
# Add JNA
vm.RemoteCommand('cd {0} && curl -LJO {1}'.format(
posixpath.join(CASSANDRA_DIR, 'lib'),
JNA_JAR_URL))
def YumInstall(vm):
"""Installs Cassandra on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs Cassandra on the VM."""
_Install(vm)
def JujuInstall(vm, vm_group_name):
"""Installs the Cassandra charm on the VM."""
vm.JujuDeploy('cs:trusty/cassandra', vm_group_name)
# The charm defaults to Cassandra 2.2.x, which has deprecated
# cassandra-cli. Specify the sources to downgrade to Cassandra 2.1.x
# to match the cassandra benchmark(s) expectations.
sources = ['deb https://www.apache.org/dist/cassandra/debian 21x main',
'ppa:openjdk-r/ppa',
'ppa:stub/cassandra']
keys = ['F758CE318D77295D',
'null',
'null']
vm.JujuSet(
'cassandra',
[
# Allow authentication from all units
'authenticator=AllowAllAuthenticator',
'install_sources="[%s]"' %
', '.join(["'" + x + "'" for x in sources]),
'install_keys="[%s]"' % ', '.join(keys)
])
# Wait for cassandra to be installed and configured
vm.JujuWait()
for unit in vm.units:
# Make sure the cassandra/conf dir is created, since we're skipping
# the manual installation to /opt/pkb.
remote_path = posixpath.join(CASSANDRA_DIR, 'conf')
unit.RemoteCommand('mkdir -p %s' % remote_path)
def Configure(vm, seed_vms):
"""Configure Cassandra on 'vm'.
Args:
vm: VirtualMachine. The VM to configure.
seed_vms: List of VirtualMachine. The seed virtual machine(s).
"""
context = {'ip_address': vm.internal_ip,
'data_path': posixpath.join(vm.GetScratchDir(), 'cassandra'),
'seeds': ','.join(vm.internal_ip for vm in seed_vms),
'num_cpus': vm.NumCpusForBenchmark(),
'cluster_name': 'Test cluster',
'concurrent_reads': FLAGS.cassandra_concurrent_reads}
for config_file in [CASSANDRA_ENV_TEMPLATE, CASSANDRA_YAML_TEMPLATE]:
local_path = data.ResourcePath(config_file)
remote_path = posixpath.join(
CASSANDRA_DIR, 'conf',
os.path.splitext(os.path.basename(config_file))[0])
vm.RenderTemplate(local_path, remote_path, context=context)
def Start(vm):
"""Start Cassandra on a VM.
Args:
vm: The target vm. Should already be configured via 'Configure'.
"""
if vm.OS_TYPE == os_types.JUJU:
return
vm.RemoteCommand(
'nohup {0}/bin/cassandra -p "{1}" 1> {2} 2> {3} &'.format(
CASSANDRA_DIR, CASSANDRA_PID, CASSANDRA_OUT, CASSANDRA_ERR))
def Stop(vm):
"""Stops Cassandra on 'vm'."""
if vm.OS_TYPE == os_types.JUJU:
return
vm.RemoteCommand('kill $(cat {0})'.format(CASSANDRA_PID),
ignore_failure=True)
def IsRunning(vm):
"""Returns a boolean indicating whether Cassandra is running on 'vm'."""
cassandra_pid = vm.RemoteCommand(
'cat {0} || true'.format(CASSANDRA_PID))[0].strip()
if not cassandra_pid:
return False
try:
vm.RemoteCommand('kill -0 {0}'.format(cassandra_pid))
return True
except errors.VirtualMachine.RemoteCommandError:
logging.warn('%s: Cassandra is not running. '
'Startup STDOUT:\n%s\n\nSTDERR:\n%s',
vm,
vm.RemoteCommand('cat ' + CASSANDRA_OUT),
vm.RemoteCommand('cat ' + CASSANDRA_ERR))
return False
def CleanNode(vm):
"""Remove Cassandra data from 'vm'.
Args:
vm: VirtualMachine. VM to clean.
"""
if vm.OS_TYPE == os_types.JUJU:
return
data_path = posixpath.join(vm.GetScratchDir(), 'cassandra')
vm.RemoteCommand('rm -rf {0}'.format(data_path))
def _StartCassandraIfNotRunning(vm):
"""Starts Cassandra on 'vm' if not currently running."""
if not IsRunning(vm):
logging.info('Retrying starting cassandra on %s', vm)
Start(vm)
def GetCassandraCliPath(vm):
if vm.OS_TYPE == os_types.JUJU:
# Replace the stock CASSANDRA_CLI so that it uses the binary
# installed by the cassandra charm.
return '/usr/bin/cassandra-cli'
return posixpath.join(CASSANDRA_DIR, 'bin',
'cassandra-cli')
def GetCassandraStressPath(vm):
if vm.OS_TYPE == os_types.JUJU:
# Replace the stock CASSANDRA_STRESS so that it uses the binary
# installed by the cassandra-stress charm.
return '/usr/bin/cassandra-stress'
return posixpath.join(CASSANDRA_DIR, 'tools', 'bin',
'cassandra-stress')
def GetNumberOfNodesUp(vm):
"""Gets the number of VMs which are up in a Cassandra cluster.
Args:
vm: VirtualMachine. The VM to use to check the cluster status.
"""
vms_up = vm.RemoteCommand(
'{0} status | grep -c "^UN"'.format(NODETOOL))[0].strip()
return int(vms_up)
def StartCluster(seed_vm, vms):
"""Starts a Cassandra cluster.
Starts a Cassandra cluster, first starting 'seed_vm', then remaining VMs in
'vms'.
Args:
seed_vm: VirtualMachine. Machine which will function as the sole seed. It
will be started before all other VMs.
vms: list of VirtualMachines. VMs *other than* seed_vm which should be
started.
"""
if seed_vm.OS_TYPE == os_types.JUJU:
# Juju automatically configures and starts the Cassandra cluster.
return
vm_count = len(vms) + 1
# Cassandra setup
logging.info('Starting seed VM %s', seed_vm)
Start(seed_vm)
logging.info('Waiting %ds for seed to start', NODE_START_SLEEP)
time.sleep(NODE_START_SLEEP)
for i in range(5):
if not IsRunning(seed_vm):
logging.warn('Seed %s: Cassandra not running yet (try %d). Waiting %ds.',
seed_vm, i, NODE_START_SLEEP)
time.sleep(NODE_START_SLEEP)
else:
break
else:
raise ValueError('Cassandra failed to start on seed.')
if vms:
logging.info('Starting remaining %d nodes', len(vms))
# Start the VMs with a small pause in between each, to allow the node to
# join.
# Starting Cassandra nodes fails when multiple nodes attempt to join the
# cluster concurrently.
for i, vm in enumerate(vms):
time.sleep(NODE_START_SLEEP)
logging.info('Starting non-seed VM %d/%d.', i + 1, len(vms))
Start(vm)
logging.info('Waiting %ds for nodes to join', CLUSTER_START_SLEEP)
time.sleep(CLUSTER_START_SLEEP)
for i in range(CLUSTER_START_TRIES):
vms_up = GetNumberOfNodesUp(seed_vm)
if vms_up == vm_count:
logging.info('All %d nodes up!', vm_count)
break
logging.warn('Try %d: only %s of %s up. Restarting and sleeping %ds', i,
vms_up, vm_count, NODE_START_SLEEP)
vm_util.RunThreaded(_StartCassandraIfNotRunning, vms)
time.sleep(NODE_START_SLEEP)
else:
raise IOError('Failed to start Cassandra cluster.')
| 32.278287 | 79 | 0.696163 |
ee9276cfe1ae2654db19c95b8031bc378728abc6 | 425 | py | Python | ln/util.py | ksons/ln.py | ee3dcc3c77dd54d1cabb0a5d6d036fcd7803c4f8 | [
"MIT"
] | 2 | 2020-12-15T07:40:35.000Z | 2022-01-13T21:06:23.000Z | ln/util.py | ksons/ln.py | ee3dcc3c77dd54d1cabb0a5d6d036fcd7803c4f8 | [
"MIT"
] | null | null | null | ln/util.py | ksons/ln.py | ee3dcc3c77dd54d1cabb0a5d6d036fcd7803c4f8 | [
"MIT"
] | null | null | null | from pyrr import Vector3
import numpy as np
def vector_min(v1: Vector3, v2: Vector3):
return np.minimum(v1, v2)
def vector_max(v1: Vector3, v2: Vector3):
return np.maximum(v1, v2)
def vector_min_axis(v1: Vector3) -> Vector3:
x, y, z = [abs(v) for v in v1]
if x <= y and x <= z:
return Vector3([1, 0, 0])
if y <= x and y <= z:
return Vector3([0, 1, 0])
return Vector3([0, 0, 1])
| 20.238095 | 44 | 0.590588 |
cca90bdb7a48a74bde002ebca7ad1ef832f98d3a | 936 | py | Python | examples/ner/service/ner.py | aashish24/tangelo | fcaf2542d48904055558fa43c8d50533afc9ad71 | [
"Apache-2.0"
] | 1 | 2015-06-13T09:28:36.000Z | 2015-06-13T09:28:36.000Z | examples/ner/service/ner.py | aashish24/tangelo | fcaf2542d48904055558fa43c8d50533afc9ad71 | [
"Apache-2.0"
] | null | null | null | examples/ner/service/ner.py | aashish24/tangelo | fcaf2542d48904055558fa43c8d50533afc9ad71 | [
"Apache-2.0"
] | null | null | null | import cherrypy
import nltk
import nltk.chunk.named_entity
from tangelo import empty_response
# This service performs named entity recognition on input text.
def run(text=""):
# Create an empty result container.
response = empty_response();
response['result'] = [];
# If nothing passed in, return an empty result.
if text == "":
return response
# Otherwise, perform named entity recognition.
sentences = nltk.sent_tokenize(text)
chunks = [nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(s))) for s in sentences]
# Now find all tagged chunks that are not whole sentences - gather the leaves of such
# chunks into strings, and place them in the list of named entities.
for c in chunks:
for subtree in filter(lambda x: x.node != 'S', c.subtrees()):
response['result'].append( (subtree.node, ' '.join(map(lambda x: x[0], subtree.leaves())) ) )
return response
| 33.428571 | 105 | 0.683761 |
c454b2fefc7bed57e38b1e3750ce8d380a6b90d7 | 256 | py | Python | problems/009.py | JoshKarpel/Euler | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | 1 | 2017-09-20T22:26:24.000Z | 2017-09-20T22:26:24.000Z | problems/009.py | JoshKarpel/euler-python | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | null | null | null | problems/009.py | JoshKarpel/euler-python | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | null | null | null | def solve():
target = 1000
for a in range(1, target):
for b in range(1, target - a):
c = target - a - b
if a ** 2 + b ** 2 == c ** 2:
return a * b * c
if __name__ == '__main__':
print(solve())
| 19.692308 | 41 | 0.425781 |
b3569e1a298c0cc9bc81c3a250174cfc2a7939ce | 12,880 | py | Python | client/v2_2/docker_session_.py | tungstenfabric-infra/containerregistry | 24e0565cf56b967a263423c08992426593dc7256 | [
"Apache-2.0"
] | null | null | null | client/v2_2/docker_session_.py | tungstenfabric-infra/containerregistry | 24e0565cf56b967a263423c08992426593dc7256 | [
"Apache-2.0"
] | null | null | null | client/v2_2/docker_session_.py | tungstenfabric-infra/containerregistry | 24e0565cf56b967a263423c08992426593dc7256 | [
"Apache-2.0"
] | 1 | 2019-08-01T05:22:36.000Z | 2019-08-01T05:22:36.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package manages pushes to and deletes from a v2 docker registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import concurrent.futures
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v2_2 import docker_http
from containerregistry.client.v2_2 import docker_image
from containerregistry.client.v2_2 import docker_image_list as image_list
import httplib2
import six.moves.http_client
import six.moves.urllib.parse
def _tag_or_digest(name):
if isinstance(name, docker_name.Tag):
return name.tag
else:
assert isinstance(name, docker_name.Digest)
return name.digest
class Push(object):
"""Push encapsulates a Registry v2.2 Docker push session."""
def __init__(self,
protocol,
name,
creds,
transport,
mount = None,
threads = 1):
"""Constructor.
If multiple threads are used, the caller *must* ensure that the provided
transport is thread-safe, as well as the image that is being uploaded.
It is notable that tarfile and httplib2.Http in Python are NOT threadsafe.
Args:
name: the fully-qualified name of the tag to push
creds: credential provider for authorizing requests
transport: the http transport to use for sending requests
mount: list of repos from which to mount blobs.
threads: the number of threads to use for uploads.
Raises:
ValueError: an incorrectly typed argument was supplied.
"""
self._name = name
self._transport = docker_http.Transport(name, creds, transport,
docker_http.PUSH)
self._mount = mount
self._threads = threads
self._protocol = protocol
def _scheme_and_host(self):
return '{scheme}://{registry}'.format(
scheme = self._protocol,
registry=self._name.registry)
def _base_url(self):
return self._scheme_and_host() + '/v2/{repository}'.format(
repository=self._name.repository)
def _get_absolute_url(self, location):
# If 'location' is an absolute URL (includes host), this will be a no-op.
return six.moves.urllib.parse.urljoin(
base=self._scheme_and_host(), url=location)
def _blob_exists(self, digest):
"""Check the remote for the given layer."""
# HEAD the blob, and check for a 200
resp, unused_content = self._transport.Request(
'{base_url}/blobs/{digest}'.format(
base_url=self._base_url(), digest=digest),
method='HEAD',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
])
return resp.status == six.moves.http_client.OK # pytype: disable=attribute-error
def _manifest_exists(
self, image
):
"""Check the remote for the given manifest by digest."""
# GET the manifest by digest, and check for 200
resp, unused_content = self._transport.Request(
'{base_url}/manifests/{digest}'.format(
base_url=self._base_url(), digest=image.digest()),
method='GET',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
],
accepted_mimes=[image.media_type()])
return resp.status == six.moves.http_client.OK # pytype: disable=attribute-error
def _get_blob(self, image, digest):
if digest == image.config_blob():
return image.config_file().encode('utf8')
return image.blob(digest)
def _monolithic_upload(self, image,
digest):
self._transport.Request(
'{base_url}/blobs/uploads/?digest={digest}'.format(
base_url=self._base_url(), digest=digest),
method='POST',
body=self._get_blob(image, digest),
accepted_codes=[six.moves.http_client.CREATED])
def _add_digest(self, url, digest):
scheme, netloc, path, query_string, fragment = (
six.moves.urllib.parse.urlsplit(url))
qs = six.moves.urllib.parse.parse_qs(query_string)
qs['digest'] = [digest]
query_string = six.moves.urllib.parse.urlencode(qs, doseq=True)
return six.moves.urllib.parse.urlunsplit((scheme, netloc, path,
query_string, fragment))
def _put_upload(self, image, digest):
mounted, location = self._start_upload(digest, self._mount)
if mounted:
logging.info('Layer %s mounted.', digest)
return
location = self._add_digest(location, digest)
self._transport.Request(
location,
method='PUT',
body=self._get_blob(image, digest),
accepted_codes=[six.moves.http_client.CREATED])
# pylint: disable=missing-docstring
def _patch_upload(self, image,
digest):
mounted, location = self._start_upload(digest, self._mount)
if mounted:
logging.info('Layer %s mounted.', digest)
return
location = self._get_absolute_url(location)
resp, unused_content = self._transport.Request(
location,
method='PATCH',
body=self._get_blob(image, digest),
content_type='application/octet-stream',
accepted_codes=[
six.moves.http_client.NO_CONTENT, six.moves.http_client.ACCEPTED,
six.moves.http_client.CREATED
])
location = self._add_digest(resp['location'], digest)
location = self._get_absolute_url(location)
self._transport.Request(
location,
method='PUT',
body=None,
accepted_codes=[six.moves.http_client.CREATED])
def _put_blob(self, image, digest):
"""Upload the aufs .tgz for a single layer."""
# We have a few choices for unchunked uploading:
# POST to /v2/<name>/blobs/uploads/?digest=<digest>
# Fastest, but not supported by many registries.
# self._monolithic_upload(image, digest)
#
# or:
# POST /v2/<name>/blobs/uploads/ (no body*)
# PUT /v2/<name>/blobs/uploads/<uuid> (full body)
# Next fastest, but there is a mysterious bad interaction
# with Bintray. This pattern also hasn't been used in
# clients since 1.8, when they switched to the 3-stage
# method below.
# self._put_upload(image, digest)
# or:
# POST /v2/<name>/blobs/uploads/ (no body*)
# PATCH /v2/<name>/blobs/uploads/<uuid> (full body)
# PUT /v2/<name>/blobs/uploads/<uuid> (no body)
#
# * We attempt to perform a cross-repo mount if any repositories are
# specified in the "mount" parameter. This does a fast copy from a
# repository that is known to contain this blob and skips the upload.
self._patch_upload(image, digest)
def _remote_tag_digest(
self, image
):
"""Check the remote for the given manifest by digest."""
# GET the tag we're pushing
resp, unused_content = self._transport.Request(
'{base_url}/manifests/{tag}'.format(
base_url=self._base_url(),
tag=self._name.tag), # pytype: disable=attribute-error
method='GET',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
],
accepted_mimes=[image.media_type()])
if resp.status == six.moves.http_client.NOT_FOUND: # pytype: disable=attribute-error
return None
return resp.get('docker-content-digest')
def _put_manifest(
self,
image,
use_digest = False):
"""Upload the manifest for this image."""
if use_digest:
tag_or_digest = image.digest()
else:
tag_or_digest = _tag_or_digest(self._name)
self._transport.Request(
'{base_url}/manifests/{tag_or_digest}'.format(
base_url=self._base_url(), tag_or_digest=tag_or_digest),
method='PUT',
body=image.manifest(),
content_type=image.media_type(),
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.CREATED,
six.moves.http_client.ACCEPTED
])
def _start_upload(self,
digest,
mount = None
):
"""POST to begin the upload process with optional cross-repo mount param."""
if not mount:
# Do a normal POST to initiate an upload if mount is missing.
url = '{base_url}/blobs/uploads/'.format(base_url=self._base_url())
accepted_codes = [six.moves.http_client.ACCEPTED]
else:
# If we have a mount parameter, try to mount the blob from another repo.
mount_from = '&'.join([
'from=' + six.moves.urllib.parse.quote(repo.repository, '')
for repo in self._mount
])
url = '{base_url}/blobs/uploads/?mount={digest}&{mount_from}'.format(
base_url=self._base_url(), digest=digest, mount_from=mount_from)
accepted_codes = [
six.moves.http_client.CREATED, six.moves.http_client.ACCEPTED
]
resp, unused_content = self._transport.Request(
url, method='POST', body=None, accepted_codes=accepted_codes)
# pytype: disable=attribute-error,bad-return-type
return resp.status == six.moves.http_client.CREATED, resp.get('location')
# pytype: enable=attribute-error,bad-return-type
def _upload_one(self, image, digest):
"""Upload a single layer, after checking whether it exists already."""
if self._blob_exists(digest):
logging.info('Layer %s exists, skipping', digest)
return
self._put_blob(image, digest)
logging.info('Layer %s pushed.', digest)
def upload(self,
image,
use_digest = False):
"""Upload the layers of the given image.
Args:
image: the image to upload.
use_digest: use the manifest digest (i.e. not tag) as the image reference.
"""
# If the manifest (by digest) exists, then avoid N layer existence
# checks (they must exist).
if self._manifest_exists(image):
if isinstance(self._name, docker_name.Tag):
if self._remote_tag_digest(image) == image.digest():
logging.info('Tag points to the right manifest, skipping push.')
return
logging.info('Manifest exists, skipping blob uploads and pushing tag.')
else:
logging.info('Manifest exists, skipping upload.')
elif isinstance(image, image_list.DockerImageList):
for _, child in image:
# TODO(user): Refactor so that the threadpool is shared.
with child:
self.upload(child, use_digest=True)
elif self._threads == 1:
for digest in image.distributable_blob_set():
self._upload_one(image, digest)
else:
with concurrent.futures.ThreadPoolExecutor(
max_workers=self._threads) as executor:
future_to_params = {
executor.submit(self._upload_one, image, digest): (image, digest)
for digest in image.distributable_blob_set()
}
for future in concurrent.futures.as_completed(future_to_params):
future.result()
# This should complete the upload by uploading the manifest.
self._put_manifest(image, use_digest=use_digest)
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, exception_type, unused_value, unused_traceback):
if exception_type:
logging.error('Error during upload of: %s', self._name)
return
logging.info('Finished upload of: %s', self._name)
# pylint: disable=invalid-name
def Delete(
name,
creds,
transport
):
"""Delete a tag or digest.
Args:
name: a tag or digest to be deleted.
creds: the creds to use for deletion.
transport: the transport to use to contact the registry.
"""
docker_transport = docker_http.Transport(name, creds, transport,
docker_http.DELETE)
_, unused_content = docker_transport.Request(
'{scheme}://{registry}/v2/{repository}/manifests/{entity}'.format(
scheme=docker_http.Scheme(name.registry),
registry=name.registry,
repository=name.repository,
entity=_tag_or_digest(name)),
method='DELETE',
accepted_codes=[six.moves.http_client.OK, six.moves.http_client.ACCEPTED])
| 35.58011 | 89 | 0.659938 |
03f352d925c7a7c8feb27f10604cd1615b8e994f | 1,470 | py | Python | proxme/__main__.py | nbargnesi/proxme | c223c4d61ea1477bb56e7f1b6f2d0775c210acd5 | [
"MIT"
] | 1 | 2017-10-09T16:36:28.000Z | 2017-10-09T16:36:28.000Z | proxme/__main__.py | nbargnesi/proxme | c223c4d61ea1477bb56e7f1b6f2d0775c210acd5 | [
"MIT"
] | null | null | null | proxme/__main__.py | nbargnesi/proxme | c223c4d61ea1477bb56e7f1b6f2d0775c210acd5 | [
"MIT"
] | null | null | null | # coding: utf-8
"""Proxme main."""
import os
import sys
import proxme
import proxme.lib
import signal
import tornado
import tornado.options
import tornado.web
import tornado.ioloop
from optparse import OptionParser
X_NS_PROXY_AUTOCONFIG = 'application/x-ns-proxy-autoconfig'
content = {
'proxy': '127.0.0.1:1080'
}
class Handler(tornado.web.RequestHandler):
def get(self, path, **kwargs):
path = './' + path
self.set_header('Content-Type', X_NS_PROXY_AUTOCONFIG)
if not os.path.isfile(path):
self.render('template.pac', content=content)
return
with open(path) as fh:
self.write(fh.read())
def main():
app = tornado.web.Application([
(r"/(.*)", Handler),
], autoreload=True)
tornado.log.enable_pretty_logging()
if len(sys.argv) == 1:
address = '127.0.0.1'
port = 8888
elif len(sys.argv) == 3:
address = sys.argv[1]
port = sys.argv[2]
else:
raise RuntimeError('invalid arguments')
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(port, address=address)
print('Listening on %s:%d; ready for requests.' % (address, port))
def _int_handler(signal, frame):
print('Got SIGINT, shutting down.')
tornado.ioloop.IOLoop.current().stop()
signal.signal(signal.SIGINT, _int_handler)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
| 22.615385 | 70 | 0.638095 |
5adebb1e861b73d198820fef419ce237951884a8 | 23,914 | py | Python | clogd/app.py | zanardo/clog | 390e56dd8bab472532d5ff77d1dfb0acfdb72518 | [
"BSD-2-Clause"
] | null | null | null | clogd/app.py | zanardo/clog | 390e56dd8bab472532d5ff77d1dfb0acfdb72518 | [
"BSD-2-Clause"
] | null | null | null | clogd/app.py | zanardo/clog | 390e56dd8bab472532d5ff77d1dfb0acfdb72518 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013-2018, Antonio Zanardo <zanardo@gmail.com>
#
from bottle import route, view, get, post, redirect, \
response, request, static_file, abort
from functools import wraps
from hashlib import sha1
from uuid import uuid4
from base64 import b64decode
import os
import re
import yaml
import zlib
import random
import bottle
import os.path
import datetime
import subprocess
import zpgdb as db
from clogd import __VERSION__
from clogd.log import log
# Default configuration. The environment variable CLOGD_CONF should be
# declared before importing this module. This envvar should point to a yaml
# file containing a dictionary with keys and values that override the default
# configuration.
CONFIG = {
'pg_host': '127.0.0.1',
'pg_port': 5432,
'pg_db': 'clog_dev',
'pg_user': 'clog_dev',
'pg_pass': '**password**',
}
bottle.TEMPLATE_PATH.insert(
0, os.path.join(os.path.dirname(__file__), 'views')
)
# Bottle has a low limit for post data. Let's make it larger.
bottle.BaseRequest.MEMFILE_MAX = 10 * 1024 * 1024
def duration_to_human(seconds):
ms = (seconds - int(seconds)) * 100
s = seconds
m = seconds/60
h = seconds/3600
return '%02d:%02d:%02d.%03d' % (h, m, s, ms)
def date_to_human(dt):
if dt is None:
return ''
return dt.strftime('%Y-%m-%d')
def getctx():
user = currentuser()
user_is_admin = userisadmin(user)
return dict(version=__VERSION__, username=user, isadmin=user_is_admin)
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
session_id = request.get_cookie('clog')
if not session_id or not validatesession(session_id):
return redirect('/login')
return f(*args, **kwargs)
return decorated
def requires_admin(f):
@wraps(f)
def decorated(*args, **kwargs):
session_id = request.get_cookie('clog')
if not session_id or not validatesession(session_id) or \
not userisadmin(currentuser()):
return 'not authorized'
return f(*args, **kwargs)
return decorated
def validateuserdb(user, passwd):
passwdsha1 = sha1(passwd).hexdigest()
with db.trans() as c:
c.execute('''
SELECT username
FROM users
WHERE username = %(user)s
AND password = %(passwdsha1)s
''', locals())
r = c.fetchone()
return bool(r)
def validatesession(session_id):
with db.trans() as c:
c.execute('''
SELECT session_id
FROM sessions
WHERE session_id = %(session_id)s
''', locals())
r = c.fetchone()
return bool(r)
def currentuser():
session_id = request.get_cookie('clog')
with db.trans() as c:
c.execute('''
SELECT username
FROM sessions
WHERE session_id = %(session_id)s
''', locals())
return c.fetchone()['username']
def userisadmin(username):
with db.trans() as c:
c.execute('''
SELECT is_admin
FROM users
WHERE username = %(username)s
''', locals())
return c.fetchone()['is_admin']
def removesession(session_id):
with db.trans() as c:
c.execute('''
DELETE FROM sessions
WHERE session_id = %(session_id)s
''', locals())
def makesession(user):
with db.trans() as c:
session_id = str(uuid4())
c.execute('''
INSERT INTO sessions
(session_id, username)
VALUES
(%(session_id)s, %(user)s)
''', locals())
return session_id
def get_job_id(computername, computeruser, script):
with db.trans() as c:
c.execute('''
SELECT id
FROM jobs
WHERE computername = %(computername)s
AND computeruser = %(computeruser)s
AND script = %(script)s
''', locals())
r = c.fetchone()
if not r:
return None
else:
return r[0]
@get('/admin')
@view('admin')
@requires_auth
@requires_admin
def admin():
users = []
with db.trans() as c:
c.execute('''
SELECT username, is_admin
FROM users
ORDER BY username
''')
for user in c:
user = dict(user)
users.append(user)
return dict(ctx=getctx(), users=users)
@get('/admin/remove-user/:username')
@requires_auth
@requires_admin
def removeuser(username):
if username == currentuser():
return 'cant remove current user!'
with db.trans() as c:
c.execute('''
DELETE FROM sessions
WHERE username = %(username)s
''', locals())
c.execute('''
DELETE FROM users
WHERE username = %(username)s
''', locals())
return redirect('/admin')
@post('/admin/save-new-user')
@requires_auth
@requires_admin
def newuser():
username = request.forms.username
if username.strip() == '':
return 'invalid user!'
password = str(int(random.random() * 999999))
sha1password = sha1(password).hexdigest()
with db.trans() as c:
c.execute('''
INSERT INTO users
(username, password, is_admin)
VALUES
(%(username)s, %(sha1password)s, 'f')
''', locals())
return u'user %s created with password %s' % (username, password)
@get('/admin/force-new-password/:username')
@requires_auth
@requires_admin
def forceuserpassword(username):
password = str(int(random.random() * 999999))
sha1password = sha1(password).hexdigest()
if username == currentuser():
return 'cant change password for current user!'
with db.trans() as c:
c.execute('''
UPDATE users
SET password = %(sha1password)s
WHERE username = %(username)s
''', locals())
return u'user %s had password changed to: %s' % (username, password)
@get('/admin/change-user-admin-status/:username/:status')
@requires_auth
@requires_admin
def changeuseradminstatus(username, status):
if username == currentuser():
return 'cant change admin status for current user!'
if status not in ('0', '1'):
abort(400, "invalid status")
status = bool(int(status))
with db.trans() as c:
c.execute('''
UPDATE users
SET is_admin = %(status)s
WHERE username = %(username)s
''', locals())
return redirect('/admin')
@get('/login')
@view('login')
def login():
return dict(version=__VERSION__)
@post('/login')
def validatelogin():
user = request.forms.user
passwd = request.forms.passwd
if validateuserdb(user, passwd):
session_id = makesession(user)
response.set_cookie('clog', session_id)
return redirect('/')
else:
return 'invalid user or password'
@get('/logout')
def logout():
session_id = request.get_cookie('clog')
if session_id:
removesession(session_id)
response.delete_cookie('clog')
return redirect('/login')
@get('/change-password')
@view('change-password')
@requires_auth
def changepassword():
return dict(ctx=getctx())
@post('/change-password')
@requires_auth
def changepasswordsave():
oldpasswd = request.forms.oldpasswd
newpasswd = request.forms.newpasswd
newpasswd2 = request.forms.newpasswd2
username = currentuser()
if not validateuserdb(username, oldpasswd):
return 'invalid current password!'
if newpasswd.strip() == '' or newpasswd2.strip() == '':
return 'invalid new password!'
if newpasswd != newpasswd2:
return 'new passwords do not match!'
passwdsha1 = sha1(newpasswd).hexdigest()
with db.trans() as c:
c.execute('''
UPDATE users
SET password = %(passwdsha1)s
WHERE username = %(username)s
''', locals())
return redirect('/')
@route('/static/:filename')
def static(filename):
if not re.match(r'^[\w\d\-]+\.[\w\d\-]+$', filename):
abort(400, "invalid filename")
root = os.path.dirname(__file__)
return static_file('static/%s' % filename, root=root)
@get('/jobs/<computername>/<computeruser>/<script>/<id>')
@requires_auth
def joboutput(computername, computeruser, script, id):
if not re.match(r'^[a-f0-9-]{36}$', id):
raise ValueError('invalid id')
output = ''
with db.trans() as c:
c.execute('''
SELECT o.output
FROM jobhistory AS h
INNER JOIN jobs AS j
ON h.job_id = j.id
INNER JOIN outputs AS o
ON o.sha1 = h.output_sha1
WHERE j.computername = %(computername)s
AND j.computeruser = %(computeruser)s
AND j.script = %(script)s
AND h.id = %(id)s
''', locals())
r = c.fetchone()
if not r:
response.status = 404
return 'not found'
else:
response.content_type = 'text/plain; charset=utf-8'
return zlib.decompress(r['output'])
@get('/jobs/<computername>/<computeruser>/<script>/')
@view('history')
@requires_auth
def jobhistory(computername, computeruser, script):
ctx = getctx()
ctx['computername'] = computername
ctx['computeruser'] = computeruser
ctx['script'] = script
with db.trans() as c:
c.execute('''
SELECT h.id
, j.computername
, j.computeruser
, j.script
, h.datestarted
, h.datefinished
, h.status
, h.duration
FROM jobhistory AS h
INNER JOIN jobs AS j
ON j.id = h.job_id
WHERE j.computername = %(computername)s
AND j.computeruser = %(computeruser)s
AND j.script = %(script)s
ORDER BY j.computername
, j.computeruser
, j.script
, h.datestarted DESC
''', locals())
history = []
for hist in c:
h = dict(hist)
h['duration'] = duration_to_human(h['duration'])
history.append(h)
return dict(history=history, ctx=ctx)
@get('/history')
@view('historytable')
@requires_auth
def allhistory():
offset = 0
if 'offset' in request.query:
if re.match(r'^\d+$', request.query.offset):
offset = int(request.query.offset)*25
with db.trans() as c:
c.execute('''
SELECT h.id
, j.computername
, j.computeruser
, j.script
, h.datestarted
, h.datefinished
, h.status
, h.duration
FROM jobhistory AS h
INNER JOIN jobs AS j
ON j.id = h.job_id
ORDER BY h.datestarted DESC LIMIT 25 OFFSET %(offset)s
''', locals())
history = []
for hist in c:
h = dict(hist)
h['duration'] = duration_to_human(h['duration'])
history.append(h)
return dict(history=history, offset=offset)
@get('/config-job/<computername>/<computeruser>/<script>/')
@view('config-job')
@requires_auth
@requires_admin
def configjob(computername, computeruser, script):
ctx = getctx()
ctx['computername'] = computername
ctx['computeruser'] = computeruser
ctx['script'] = script
daystokeep = 30
with db.trans() as c:
c.execute('''
SELECT c.daystokeep
FROM jobconfig AS c
INNER JOIN jobs AS j
ON j.id = c.job_id
WHERE j.computername = %(computername)s
AND j.computeruser = %(computeruser)s
AND j.script = %(script)s
''', locals())
r = c.fetchone()
if r:
daystokeep = r['daystokeep']
c.execute('''
SELECT a.email
FROM jobconfigalert AS a
INNER JOIN jobs AS j
ON j.id = a.job_id
WHERE j.computername = %(computername)s
AND j.computeruser = %(computeruser)s
AND j.script = %(script)s
''', locals())
emails = []
for r in c:
emails.append(r['email'])
return dict(ctx=ctx, daystokeep=daystokeep, emails="\n".join(emails))
@post('/purge-job/<computername>/<computeruser>/<script>/')
@requires_auth
@requires_admin
def purgejob(computername, computeruser, script):
job_id = get_job_id(computername, computeruser, script)
with db.trans() as c:
c.execute('''
DELETE FROM jobhistory
WHERE job_id = %(job_id)s
''', locals())
c.execute('''
DELETE FROM jobconfig
WHERE job_id = %(job_id)s
''', locals())
c.execute('''
DELETE FROM jobconfigalert
WHERE job_id = %(job_id)s
''', locals())
c.execute('''
DELETE FROM jobs
WHERE id = %(job_id)s
''', locals())
return redirect('/')
@post('/save-daystokeep/<computername>/<computeruser>/<script>/')
@requires_auth
@requires_admin
def savedaystokeep(computername, computeruser, script):
daystokeep = request.forms.daystokeep
if not re.match(r'^\d+$', daystokeep):
abort(400, "invalid days to keep")
daystokeep = int(daystokeep)
if daystokeep < 0:
return 'days to keep must be >= 0'
job_id = get_job_id(computername, computeruser, script)
with db.trans() as c:
c.execute('''
UPDATE jobconfig
SET daystokeep = %(daystokeep)s
WHERE job_id = %(job_id)s
''', locals())
if c.rowcount == 0:
c.execute('''
INSERT INTO jobconfig
(job_id, daystokeep)
VALUES
(%(job_id)s, %(daystokeep)s)
''', locals())
return redirect('/config-job/' + computername + '/' +
computeruser + '/' + script + '/')
@post('/save-alertemails/<computername>/<computeruser>/<script>/')
@requires_auth
@requires_admin
def savealertemails(computername, computeruser, script):
job_id = get_job_id(computername, computeruser, script)
with db.trans() as c:
c.execute('''
DELETE FROM jobconfigalert
WHERE job_id = %(job_id)s
''', locals())
for email in request.forms.emails.split():
c.execute('''
INSERT INTO jobconfigalert
VALUES
(%(job_id)s, %(email)s)
''', locals())
return redirect('/config-job/' + computername + '/' +
computeruser + '/' + script + '/')
@get('/')
@view('jobs')
@requires_auth
def index():
return dict(ctx=getctx())
@get('/jobs')
@view('jobstable')
@requires_auth
def jobs():
with db.trans() as c:
c.execute('''
SELECT computername
, computeruser
, script
, date_last_success
, date_last_failure
, last_status
, last_duration
FROM jobs
ORDER BY computername
, computeruser
, script
''')
jobs = []
for job in c:
j = dict(job)
j['date_last_success'] = date_to_human(j['date_last_success'])
j['date_last_failure'] = date_to_human(j['date_last_failure'])
j['last_duration'] = duration_to_human(j['last_duration'])
jobs.append(j)
return dict(jobs=jobs)
@post('/')
def newjob():
rid_regexp = r'^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$'
rid = request.forms.id
if not re.match(rid_regexp, rid):
abort(400, "invalid job id")
start_time = request.forms.start_time
if not re.match(r'^[\d+\.]+$', start_time):
abort(400, "invalid start time")
start_time = datetime.datetime.fromtimestamp(int(float(start_time)))
end_time = request.forms.end_time
if not re.match(r'^[\d+\.]+$', end_time):
abort(400, "invalid end time")
end_time = datetime.datetime.fromtimestamp(int(float(end_time)))
duration = request.forms.duration
if not re.match(r'^[0-9\.]+$', duration):
abort(400, "invalid duration")
status = request.forms.status
if status not in ('fail', 'ok'):
abort(400, "invalid status")
script = request.forms.script
if not re.match(r'^[a-zA-Z0-9\-_\.]+$', script):
abort(400, "invalid script name")
output = request.forms.output or ''
output = b64decode(output)
outputz = buffer(zlib.compress(output))
computername = request.forms.computername
computeruser = request.forms.username
# Windows
if '\\' in computeruser:
computeruser = computeruser.split('\\')[-1]
ip = request.remote_addr
log.info('new job status from %s@%s/%s (%s)', computeruser, computername,
script, ip)
log.info(' id: %s', rid)
output_sha1 = sha1(output).hexdigest()
with db.trans() as c:
try:
c.execute('''
INSERT INTO outputs
(sha1, output)
VALUES
(%(output_sha1)s, %(outputz)s)
''', locals())
except db.psycopg2.IntegrityError:
pass
job_id = get_job_id(computername, computeruser, script)
if not job_id:
with db.trans() as c:
c.execute('''
INSERT INTO jobs (
computername, computeruser, script,
last_status, last_duration
)
VALUES (
%(computername)s, %(computeruser)s, %(script)s,
%(status)s, %(duration)s
)
RETURNING id
''', locals())
job_id = c.fetchone()[0]
try:
with db.trans() as c:
c.execute('''
INSERT INTO jobhistory (
id, job_id, ip, datestarted, datefinished,
status, duration, output_sha1
)
VALUES (
%(rid)s, %(job_id)s, %(ip)s, %(start_time)s,
%(end_time)s, %(status)s, %(duration)s, %(output_sha1)s
)
''', locals())
if status == 'ok':
c.execute('''
UPDATE jobs
SET date_last_success = %(start_time)s
, last_status = 'ok'
, last_duration = %(duration)s
WHERE id = %(job_id)s
''', locals())
else:
c.execute('''
UPDATE jobs
SET date_last_failure = %(start_time)s
, last_status = 'fail'
, last_duration = %(duration)s
WHERE id = %(job_id)s
''', locals())
except db.psycopg2.IntegrityError:
# Ignoring duplicate insertion.
return 'ok'
else:
emails = getalertemails(computername, computeruser, script)
if emails:
if status == 'fail':
for email in emails:
log.info(" job failed, sending alert to %s", email)
send_alert(
email, computername, computeruser, script,
status, output)
elif status == 'ok':
with db.trans() as c:
c.execute('''
SELECT status
FROM jobhistory
WHERE job_id = %(job_id)s
ORDER BY datestarted DESC LIMIT 1 OFFSET 1
''', locals())
r = c.fetchone()
if r and r['status'] == 'fail':
for email in emails:
log.info(" job ok, sending alert to %s", email)
send_alert(
email, computername, computeruser, script,
status, output
)
return 'ok'
# Get notification e-mails for a job.
def getalertemails(computername, computeruser, script):
job_id = get_job_id(computername, computeruser, script)
with db.trans() as c:
c.execute('''
SELECT email
FROM jobconfigalert
WHERE job_id = %(job_id)s
''', locals())
emails = []
for row in c:
emails.append(row['email'])
return emails
# Delete login sessions older than 7 days
def purge_sessions():
with db.trans() as c:
c.execute('''
DELETE FROM sessions
WHERE date(now()) - date(date_login) > 7
''')
if c.rowcount > 0:
log.info('purged %s login sessions', c.rowcount)
# Delete old entries on jobhistory from database.
def purge_jobhistory():
with db.trans() as c:
c.execute('''
SELECT id
FROM jobs
''')
for job in c:
job_id = job['id']
with db.trans() as c2:
c2.execute('''
SELECT daystokeep
FROM jobconfig
WHERE job_id = %(job_id)s
''', locals())
daystokeep = 30
r = c2.fetchone()
if r:
daystokeep = r['daystokeep']
with db.trans() as c3:
c3.execute('''
DELETE FROM jobhistory
WHERE date(now()) - date(datestarted) > %(daystokeep)s
AND job_id = %(job_id)s
''', locals())
if c3.rowcount > 0:
log.debug("purged %s entries for jobhistory",
c3.rowcount)
# Delete unreferenced entries from outputs.
def purge_outputs():
with db.trans() as c:
c.execute('''
DELETE FROM outputs
WHERE sha1 NOT IN (
SELECT DISTINCT output_sha1 FROM jobhistory
)
''')
if c.rowcount > 0:
log.debug("purged %s entries for outputs", c.rowcount)
def send_alert(email, computername, computeruser, script, status, output):
subject = ''
body = ''
if status == 'fail':
subject = 'clog: job {} failed for {}@{}'.format(
script,
computeruser,
computername
)
body = output
elif status == 'ok':
subject = 'clog: job {} back to normal for {}@{}'.format(
script,
computeruser,
computername
)
body += '\n\nThis is an automatic notification sent by ' + \
'clog (https://github.com/zanardo/clog)'
s = subprocess.Popen(['mail', '-s', subject, email], stdin=subprocess.PIPE)
s.communicate(body)
# Purge expired data.
@get('/cleanup')
def cleanup():
log.info('starting maintenance')
purge_jobhistory()
purge_outputs()
purge_sessions()
log.info('finishing maintenance')
app = bottle.default_app()
# Reading configuration file.
if 'CLOGD_CONF' in os.environ:
log.info('reading configuration from %s', os.environ['CLOGD_CONF'])
with open(os.environ['CLOGD_CONF'], 'r') as fp:
conf = yaml.load(fp)
for k in CONFIG:
if k in conf:
CONFIG[k] = conf[k]
db.config_connection(
CONFIG['pg_host'], CONFIG['pg_port'], CONFIG['pg_user'],
CONFIG['pg_pass'], CONFIG['pg_db'])
| 29.414514 | 82 | 0.535335 |
daeb51607ca51cd3294dcd49ec9a3ef44d85c7ca | 286 | py | Python | app/user/urls.py | royandri/recipe-app-api | 5eb7fd433946f6c25fb84d063a46173ee595adf5 | [
"MIT"
] | null | null | null | app/user/urls.py | royandri/recipe-app-api | 5eb7fd433946f6c25fb84d063a46173ee595adf5 | [
"MIT"
] | null | null | null | app/user/urls.py | royandri/recipe-app-api | 5eb7fd433946f6c25fb84d063a46173ee595adf5 | [
"MIT"
] | null | null | null | from django.urls import path
from user import views
app_name = 'user'
urlpatterns = [
path('create/', views.CreateUserView.as_view(), name='create'),
path('token/', views .CreateTokenView.as_view(), name='token'),
path('me/', views.ManageUserView.as_view(), name='me'),
]
| 26 | 67 | 0.681818 |
bfe4e48778b2838d2ed8649e416de44a13497c04 | 1,326 | py | Python | transaction-optimizer/UtxoService.py | akvelon/Bitcoin-Transaction-Optimization | e3740fe37869a0b84a472b19dbc5d879ec857837 | [
"Apache-2.0"
] | 5 | 2019-03-21T12:57:56.000Z | 2019-09-15T00:00:35.000Z | transaction-optimizer/UtxoService.py | akvelon/Bitcoin-Transaction-Optimization | e3740fe37869a0b84a472b19dbc5d879ec857837 | [
"Apache-2.0"
] | 25 | 2019-04-01T09:23:10.000Z | 2022-02-10T00:10:23.000Z | transaction-optimizer/UtxoService.py | akvelon/Bitcoin-Transaction-Optimization | e3740fe37869a0b84a472b19dbc5d879ec857837 | [
"Apache-2.0"
] | 1 | 2019-03-15T13:54:51.000Z | 2019-03-15T13:54:51.000Z | """
Copyright 2019 Akvelon Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
class UtxoService:
"monitors Unspent Transaction Outputs"
def __init__(self, bitcoinService):
self._bitcoinService = bitcoinService
self._spentOutputs = []
def list_unspent(self):
def is_unspent(output):
return not any(spent['txid'] == output['txid'] and spent['vout'] == output['vout']
for spent in self._spentOutputs)
def convert_amount_to_satoshi(output):
output = dict(output)
output['amount'] = int(100_000_000 * output['amount'])
return output
utxo = self._bitcoinService.list_unspent()
utxo = map(convert_amount_to_satoshi, utxo)
return list(filter(is_unspent, utxo))
def register_spent(self, outputs):
self._spentOutputs.extend(outputs)
| 40.181818 | 307 | 0.711916 |
2388b8156f4f24ed22f4fa58ed595d205d0bcbe1 | 11,032 | py | Python | napalm_yang/models/openconfig/network_instances/network_instance/fdb/mac_table/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 64 | 2016-10-20T15:47:18.000Z | 2021-11-11T11:57:32.000Z | napalm_yang/models/openconfig/network_instances/network_instance/fdb/mac_table/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 126 | 2016-10-05T10:36:14.000Z | 2019-05-15T08:43:23.000Z | napalm_yang/models/openconfig/network_instances/network_instance/fdb/mac_table/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 63 | 2016-11-07T15:23:08.000Z | 2021-09-22T14:41:16.000Z | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import entries
class mac_table(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/fdb/mac-table. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Table of learned or statically configured MAC addresses and
corresponding VLANs in the bridge domain
"""
__slots__ = ("_path_helper", "_extmethods", "__entries")
_yang_name = "mac-table"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__entries = YANGDynClass(
base=entries.entries,
is_container="container",
yang_name="entries",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["network-instances", "network-instance", "fdb", "mac-table"]
def _get_entries(self):
"""
Getter method for entries, mapped from YANG variable /network_instances/network_instance/fdb/mac_table/entries (container)
YANG Description: Enclosing container for list of MAC table entries
"""
return self.__entries
def _set_entries(self, v, load=False):
"""
Setter method for entries, mapped from YANG variable /network_instances/network_instance/fdb/mac_table/entries (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_entries is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_entries() directly.
YANG Description: Enclosing container for list of MAC table entries
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=entries.entries,
is_container="container",
yang_name="entries",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """entries must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=entries.entries, is_container='container', yang_name="entries", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__entries = t
if hasattr(self, "_set"):
self._set()
def _unset_entries(self):
self.__entries = YANGDynClass(
base=entries.entries,
is_container="container",
yang_name="entries",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
entries = __builtin__.property(_get_entries, _set_entries)
_pyangbind_elements = OrderedDict([("entries", entries)])
from . import entries
class mac_table(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/fdb/mac-table. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Table of learned or statically configured MAC addresses and
corresponding VLANs in the bridge domain
"""
__slots__ = ("_path_helper", "_extmethods", "__entries")
_yang_name = "mac-table"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__entries = YANGDynClass(
base=entries.entries,
is_container="container",
yang_name="entries",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["network-instances", "network-instance", "fdb", "mac-table"]
def _get_entries(self):
"""
Getter method for entries, mapped from YANG variable /network_instances/network_instance/fdb/mac_table/entries (container)
YANG Description: Enclosing container for list of MAC table entries
"""
return self.__entries
def _set_entries(self, v, load=False):
"""
Setter method for entries, mapped from YANG variable /network_instances/network_instance/fdb/mac_table/entries (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_entries is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_entries() directly.
YANG Description: Enclosing container for list of MAC table entries
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=entries.entries,
is_container="container",
yang_name="entries",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """entries must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=entries.entries, is_container='container', yang_name="entries", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__entries = t
if hasattr(self, "_set"):
self._set()
def _unset_entries(self):
self.__entries = YANGDynClass(
base=entries.entries,
is_container="container",
yang_name="entries",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
entries = __builtin__.property(_get_entries, _set_entries)
_pyangbind_elements = OrderedDict([("entries", entries)])
| 37.52381 | 380 | 0.611675 |
931cb83ffaf71d5a8ee80d14fa9121c2fceb7bf6 | 1,770 | py | Python | facepalm.py | dropbox/pep8squad | b4260bd268d8fdfd0ee1aecb6e12bd90b9ea7a66 | [
"Apache-2.0"
] | 15 | 2015-04-08T18:12:06.000Z | 2021-07-13T10:49:07.000Z | facepalm.py | dropbox/pep8squad | b4260bd268d8fdfd0ee1aecb6e12bd90b9ea7a66 | [
"Apache-2.0"
] | 2 | 2015-04-09T19:44:44.000Z | 2015-04-10T16:55:04.000Z | facepalm.py | dropbox/pep8squad | b4260bd268d8fdfd0ee1aecb6e12bd90b9ea7a66 | [
"Apache-2.0"
] | 6 | 2015-04-08T18:12:08.000Z | 2020-11-18T18:23:57.000Z | print """Something went wrong, probably because of you. Was the Python file actually valid?
............................................________
....................................,.-'"...................``~.,
.............................,.-"..................................."-.,
.........................,/...............................................":,
.....................,?......................................................,
.................../...........................................................,}
................./......................................................,:`^`..}
.............../...................................................,:"........./
..............?.....__.........................................:`.........../
............./__.(....."~-,_..............................,:`........../
.........../(_...."~,_........"~,_....................,:`........_/
..........{.._$;_......"=,_......."-,_.......,.-~-,},.~";/....}
...........((.....*~_......."=-._......";,,./`..../"............../
...,,,___.`~,......"~.,....................`.....}............../
............(....`=-,,.......`........................(......;_,,-"
............/.`~,......`-...................................../
.............`~.*-,.....................................|,./.....,__
,,_..........}.>-._...................................|..............`=~-,
.....`=~-,__......`,.................................
...................`=~-,,.,...............................
................................`:,,...........................`..............__
.....................................`=-,...................,%`>--==``
........................................_..........._,-%.......`
...................................,""" | 70.8 | 91 | 0.061582 |
cb53ec57dd8fb2666b8772f8704baa97b18dc2b9 | 146 | py | Python | my_lambdata/ds_utilities.py | charlie-may86/Lambdata-DSPT6 | 7ae3807e5b4daf4c0264660febc92e5bd0fc066f | [
"MIT"
] | null | null | null | my_lambdata/ds_utilities.py | charlie-may86/Lambdata-DSPT6 | 7ae3807e5b4daf4c0264660febc92e5bd0fc066f | [
"MIT"
] | null | null | null | my_lambdata/ds_utilities.py | charlie-may86/Lambdata-DSPT6 | 7ae3807e5b4daf4c0264660febc92e5bd0fc066f | [
"MIT"
] | null | null | null | def enlarge(n):
'''This function will multiply the input by 100'''
return n*100
y = int(input('Choose a number: '))
print(y, enlarge(y)) | 20.857143 | 54 | 0.643836 |
b6a51a7c9b599899608c1b4a2f1f1025a7124970 | 1,353 | py | Python | arc109/a.py | nishio/atcoder | 8db36537b5d8580745d5f98312162506ad7d7ab4 | [
"MIT"
] | 1 | 2021-03-09T04:28:13.000Z | 2021-03-09T04:28:13.000Z | arc109/a.py | nishio/atcoder | 8db36537b5d8580745d5f98312162506ad7d7ab4 | [
"MIT"
] | null | null | null | arc109/a.py | nishio/atcoder | 8db36537b5d8580745d5f98312162506ad7d7ab4 | [
"MIT"
] | null | null | null | # included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(a, b, x, y):
ret = x
if b < a:
a -= 1
if 2 * x < y:
up = 2 * x
else:
up = y
ret += abs(a - b) * up
return ret
def main():
# parse input
a, b, x, y = map(int, input().split())
print(solve(a, b, x, y))
# tests
T1 = """
2 1 1 5
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
1
"""
T2 = """
1 2 100 1
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
101
"""
T3 = """
1 100 1 100
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
199
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| 15.202247 | 59 | 0.512195 |
b75b10a21abb27e9f4e11d286781892449e3859c | 705 | py | Python | base/views.py | stenwire/todo_list-app | 1ec6f0d738f89be27c5e66d102c9b4b513515e2e | [
"MIT"
] | null | null | null | base/views.py | stenwire/todo_list-app | 1ec6f0d738f89be27c5e66d102c9b4b513515e2e | [
"MIT"
] | null | null | null | base/views.py | stenwire/todo_list-app | 1ec6f0d738f89be27c5e66d102c9b4b513515e2e | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView
from django.urls import reverse_lazy
from .models import Task
# Create your views here.
class TaskList(ListView):
model = Task
context_object_name = 'tasks'
class TaskDetail(DetailView):
model = Task
context_object_name = 'task'
template_name = 'base/task.html'
class TaskCreate(CreateView):
model = Task
fields = '__all__'
success_url = reverse_lazy('tasks')
class TaskUpdate(UpdateView):
model = Task
fields = '__all__'
success_url = reverse_lazy('tasks') | 22.741935 | 60 | 0.743262 |
2ad2e45e787e211b2351fcb8749b7bdbcda8b9ba | 2,405 | py | Python | Modules/Embeddings/HotEncodingEmbedding.py | PierreOreistein/MVA-Kernels | 51759ba72e5f1ad8bd3fdaa08dbf8b1e05ed1a7e | [
"MIT"
] | null | null | null | Modules/Embeddings/HotEncodingEmbedding.py | PierreOreistein/MVA-Kernels | 51759ba72e5f1ad8bd3fdaa08dbf8b1e05ed1a7e | [
"MIT"
] | null | null | null | Modules/Embeddings/HotEncodingEmbedding.py | PierreOreistein/MVA-Kernels | 51759ba72e5f1ad8bd3fdaa08dbf8b1e05ed1a7e | [
"MIT"
] | null | null | null | import itertools
import pandas as pd
import numpy as np
from numba import jit
from tqdm import tqdm
def HotEncodingEmbedding(X, d=[5, 6, 7]):
"""Implementation of the HotEncoding Embedding with DimismatchEmbedding."""
# Shape
n = np.shape(X)[0]
# Convert X as a DataFrame to deal with numba
X_df = pd.DataFrame(X, columns=["seq"])
X_df["ascii"] = X_df["seq"].apply(lambda x: list(x))
X_converted = X_df["ascii"].apply(lambda x: [ord(l) for l in x]).values
X_converted = np.array(X_converted.tolist(), dtype=np.int8)
# Letters
letters = ["A", "C", "G", "T"]
letters_converted = [ord(l) for l in letters]
# Resulting array
new_X = None
# Loop over the dimensions of the sequences
for k in tqdm(d):
# Cartesian product
lists = [letters_converted] * k
k_mer_l = np.array([elt for elt in itertools.product(*lists)])
# Shape
d_k = len(k_mer_l)
# Resulting array
new_X_k = np.zeros((n, len(X[0]) - k, d_k), dtype=np.bool_)
@jit(nopython=True, parallel=True)
def subHotEncodingEmbedding(new_X_k, X_conv=X_converted, k_mer=k_mer_l):
"""Compute the DimismacthEmbedding on for this list of k_mer."""
# Computation of the embedding of X
for i in range(n):
# Extract the sequence i
x_i = X_conv[i]
len_i = len(x_i)
for l in range(len_i - k + 1):
# Extract x_il
x_il = x_i[l:(l + k)]
# Extract indices
for j in range(d_k):
# Extract k_mer_j
k_mer_j = k_mer[j]
# Computation of gamma_k_m
matchs = 0
for p in range(k):
matchs += int(x_il[p] == k_mer_j[p])
if matchs >= k:
new_X_k[i, l, j] = 1
return new_X_k
# Compute the embedding for the given list of k_mer_l
new_X_k = subHotEncodingEmbedding(new_X_k)
# Update new_X
if new_X is None:
new_X = new_X_k.reshape((n, -1))
else:
new_X = np.hstack((new_X, new_X_k.reshape((n, -1))))
new_X = new_X.astype(np.bool_)
# Return data
return new_X
| 27.965116 | 80 | 0.525988 |
42842b0e1b6f0a7c3478f8c5dfe18687c96b9c59 | 3,887 | py | Python | run_pancancer_analysis.py | martynaut/pancancer_mirnome | 4cfc3794afd590958493cb9af45c099868dd9cca | [
"MIT"
] | null | null | null | run_pancancer_analysis.py | martynaut/pancancer_mirnome | 4cfc3794afd590958493cb9af45c099868dd9cca | [
"MIT"
] | null | null | null | run_pancancer_analysis.py | martynaut/pancancer_mirnome | 4cfc3794afd590958493cb9af45c099868dd9cca | [
"MIT"
] | null | null | null | import pandas as pd
import click
import os
@click.command()
@click.argument('input_folder')
@click.argument('output_folder')
def main(input_folder, output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
files = [[(x[0] + '/' + y, x[0].split('/')[-1].replace('DATA_', '').replace('_22', '')) for y in x[2]] for x in
os.walk(input_folder)
if '_22' in x[0].split('/')[-1]]
flat_files = [file for sublist in files for file in sublist]
csv_files = [file for file in flat_files if file[0].endswith('csv') and (
'pancancer' not in file[1])]
df_fiels_summary = pd.DataFrame()
for summary_file, folder_name in [
file for file in csv_files if file[0].endswith('files_summary_count_per_patient.csv')
]:
df_temp = pd.read_csv(summary_file)
df_temp = df_temp.groupby('indiv_name').count()
row = pd.DataFrame([[folder_name, df_temp.shape[0]]], columns=['folder', 'number_of_patients'])
df_fiels_summary = pd.concat([df_fiels_summary, row], sort=False)
if output_folder.endswith('/'):
df_fiels_summary.to_excel(output_folder + 'number_of_patients.xlsx', index=False)
else:
df_fiels_summary.to_excel(output_folder + '/number_of_patients.xlsx', index=False)
df_count_summary = pd.DataFrame()
for count_file, folder_name in [
file for file in csv_files if file[0].endswith('patient_mutation_count.csv')
]:
df_temp = pd.read_csv(count_file)
df_temp['cancer_id'] = folder_name
df_count_summary = pd.concat([df_count_summary, df_temp], sort=False)
if output_folder.endswith('/'):
df_count_summary.to_excel(output_folder + 'patient_mutation_count.xlsx', index=False)
else:
df_count_summary.to_excel(output_folder + '/patient_mutation_count.xlsx', index=False)
all_mutation_count = pd.DataFrame()
for count_file, folder_name in [
file for file in csv_files if file[0].endswith('patient_mutation_count.csv')
]:
df_temp = pd.read_csv(count_file)
df_temp['cancer'] = folder_name
all_mutation_count = pd.concat([all_mutation_count, df_temp], sort=False)
if output_folder.endswith('/'):
all_mutation_count.to_excel(output_folder + 'number_of_mutations_all_patients.xlsx', index=False)
else:
all_mutation_count.to_excel(output_folder + '/number_of_mutations_all_patients.xlsx', index=False)
df_all_mutation = pd.DataFrame()
for all_mutations_filtered_mut_type_gene, folder_name in [
file for file in csv_files if file[0].endswith('all_mutations.csv')
]:
print(folder_name)
df_temp = pd.read_csv(all_mutations_filtered_mut_type_gene)
df_temp['cancer'] = folder_name
df_all_mutation = pd.concat([df_all_mutation, df_temp], sort=False)
df_all_mutation.to_csv(output_folder + 'all_mutations.csv')
df_complex = pd.DataFrame()
for complex_file, folder_name in [
file for file in csv_files if file[0].endswith('complex.csv')
]:
df_temp = pd.read_csv(complex_file)
df_temp['cancer'] = folder_name
df_complex = pd.concat([df_complex, df_temp], sort=False)
df_complex.to_csv(output_folder + 'complex.csv')
df_occur = pd.DataFrame()
for occur_file, folder_name in [
file for file in csv_files if file[0].endswith('occur.csv')
]:
df_temp = pd.read_csv(occur_file)
df_occur = pd.concat([df_occur, df_temp], sort=False)
df_occur = df_occur.groupby(['chrom', 'pre_name', 'id', 'start_pre', 'seq_type'], as_index=False).agg({
'indiv_name_nunique': sum,
'indiv_name_count': sum,
'pos_nunique': sum,
'if_complex': lambda x: 1 if sum(x) > 0 else 0
})
df_occur.to_csv(output_folder + 'occur.csv')
if __name__ == "__main__":
main()
| 41.351064 | 115 | 0.673013 |
67bd1f8754181b55ee92fe777411ff7bfad11ff5 | 393 | py | Python | server/config.py | dcalacci/weclocked | 27a8175d3d7d6d581714849df67fe0a8e6f7636e | [
"MIT"
] | null | null | null | server/config.py | dcalacci/weclocked | 27a8175d3d7d6d581714849df67fe0a8e6f7636e | [
"MIT"
] | null | null | null | server/config.py | dcalacci/weclocked | 27a8175d3d7d6d581714849df67fe0a8e6f7636e | [
"MIT"
] | null | null | null | class Config(object):
DEBUG = True
DEVELOPMENT = True
SECRET_KEY = 'do-i-really-need-this'
FLASK_SECRET = SECRET_KEY
TESTING = False
class ProductionConfig(Config):
DEVELOPMENT = False
DEBUG = False
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
DEVELOPMENT = True
DEBUG = True
TESTING = True
| 19.65 | 40 | 0.676845 |
782d14a2091c04e2f78f8ef32e03e70284621a6d | 13,606 | py | Python | mmtbx/command_line/model_vs_map.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-03-18T12:31:57.000Z | 2022-03-14T06:27:06.000Z | mmtbx/command_line/model_vs_map.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/command_line/model_vs_map.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-03-26T12:52:30.000Z | 2021-03-26T12:52:30.000Z | from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME phenix.model_vs_map
from scitbx.array_family import flex
import iotbx.pdb
from libtbx.utils import Sorry
from libtbx.str_utils import make_sub_header
from cctbx import maptbx
from cctbx import miller
import mmtbx.utils
import mmtbx.maps.correlation
import mmtbx.model.statistics
import mmtbx.model
import sys, time
legend = """phenix.development.model_map_statistics:
Given PDB file and a map compute various statistics.
How to run:
phenix.development.model_map_statistics model.pdb map.ccp4 resolution=3
Feedback:
PAfonine@lbl.gov"""
master_params_str = """
map_file_name = None
.type = str
model_file_name = None
.type = str
resolution = None
.type = float
scattering_table = wk1995 it1992 *n_gaussian neutron electron
.type = choice
"""
def master_params():
return iotbx.phil.parse(master_params_str, process_includes=False)
def broadcast(m, log):
print("-"*79, file=log)
print(m, file=log)
print("*"*len(m), file=log)
def show_histogram(data=None, n_slots=None, data_min=None, data_max=None,
log=None):
from cctbx.array_family import flex
hm = flex.histogram(data = data, n_slots = n_slots, data_min = data_min,
data_max = data_max)
lc_1 = hm.data_min()
s_1 = enumerate(hm.slots())
for (i_1,n_1) in s_1:
hc_1 = hm.data_min() + hm.slot_width() * (i_1+1)
print("%10.4f - %-10.4f : %d" % (lc_1, hc_1, n_1), file=log)
lc_1 = hc_1
def run(args, log=sys.stdout):
print("-"*79, file=log)
print(legend, file=log)
print("-"*79, file=log)
inputs = mmtbx.utils.process_command_line_args(args = args,
master_params = master_params())
params = inputs.params.extract()
# estimate resolution
d_min = params.resolution
broadcast(m="Map resolution:", log=log)
if(d_min is None):
raise Sorry("Resolution is required.")
print(" d_min: %6.4f"%d_min, file=log)
# model
broadcast(m="Input PDB:", log=log)
file_names = inputs.pdb_file_names
if(len(file_names) != 1): raise Sorry("PDB file has to given.")
if(inputs.crystal_symmetry is None):
raise Sorry("No crystal symmetry defined.")
pdb_inp = iotbx.pdb.input(file_name=file_names[0])
model = mmtbx.model.manager(
model_input = pdb_inp,
crystal_symmetry=inputs.crystal_symmetry,
build_grm=True)
if model.get_number_of_models() > 1:
raise Sorry("Only one model allowed.")
model.setup_scattering_dictionaries(scattering_table=params.scattering_table)
model.get_xray_structure().show_summary(f=log, prefix=" ")
broadcast(m="Input map:", log=log)
if(inputs.ccp4_map is None): raise Sorry("Map file has to given.")
inputs.ccp4_map.show_summary(prefix=" ")
map_data = inputs.ccp4_map.map_data()
print(" Actual map (min,max,mean):", \
map_data.as_1d().min_max_mean().as_tuple(), file=log)
make_sub_header("Histogram of map values", out=log)
md = map_data.as_1d()
show_histogram(data=md, n_slots=10, data_min=flex.min(md),
data_max=flex.max(md), log=log)
# shift origin if needed
soin = maptbx.shift_origin_if_needed(map_data=map_data,
sites_cart=model.get_sites_cart(), crystal_symmetry=model.crystal_symmetry())
map_data = soin.map_data
model.set_sites_cart(soin.sites_cart, update_grm=True)
####
# Compute and show all stats
####
broadcast(m="Model statistics:", log=log)
make_sub_header("Overall", out=log)
info = mmtbx.model.statistics.info(model=model)
info.geometry.show()
# XXX - these are not available anymore due to refactoring
# make_sub_header("Histogram of devations from ideal bonds", out=log)
# show_histogram(data=ms.bond_deltas, n_slots=10, data_min=0, data_max=0.2,
# log=log)
# #
# make_sub_header("Histogram of devations from ideal angles", out=log)
# show_histogram(data=ms.angle_deltas, n_slots=10, data_min=0, data_max=30.,
# log=log)
# #
# make_sub_header("Histogram of non-bonded distances", out=log)
# show_histogram(data=ms.nonbonded_distances, n_slots=10, data_min=0,
# data_max=5., log=log)
#
make_sub_header("Histogram of ADPs", out=log)
info.adp.show(log=log)
# bs = xrs.extract_u_iso_or_u_equiv()*adptbx.u_as_b(1.)
# show_histogram(data=bs, n_slots=10, data_min=flex.min(bs),
# data_max=flex.max(bs), log=log)
#
# Compute CC
broadcast(m="Map-model CC (overall):", log=log)
five_cc_result = mmtbx.maps.correlation.five_cc(map = map_data,
xray_structure = model.get_xray_structure(), d_min = d_min)
atom_radius = five_cc_result.atom_radius
if atom_radius is None:
atom_radius = five_cc_result._atom_radius()
print(" CC_mask : %6.4f"%five_cc_result.result.cc_mask, file=log)
print(" CC_volume: %6.4f"%five_cc_result.result.cc_volume, file=log)
print(" CC_peaks : %6.4f"%five_cc_result.result.cc_peaks, file=log)
# Compute FSC(map, model)
broadcast(m="Model-map FSC:", log=log)
fsc = mmtbx.maps.correlation.fsc_model_vs_map(
xray_structure = model.get_xray_structure(),
map = map_data,
atom_radius = atom_radius,
d_min = d_min)
fsc.show(prefix=" ")
# Local CC
cc_calculator = mmtbx.maps.correlation.from_map_and_xray_structure_or_fmodel(
xray_structure = model.get_xray_structure(),
map_data = map_data,
d_min = d_min)
broadcast(m="Map-model CC (local):", log=log)
# per residue
print("Per residue:", file=log)
residue_results = list()
ph = model.get_hierarchy()
xrs = model.get_xray_structure()
for rg in ph.residue_groups():
cc = cc_calculator.cc(selection=rg.atoms().extract_i_seq())
chain_id = rg.parent().id
print(" chain id: %s resid %s: %6.4f"%(
chain_id, rg.resid(), cc), file=log)
# per chain
print("Per chain:", file=log)
for chain in ph.chains():
print(" chain %s: %6.4f"%(chain.id, cc_calculator.cc(
selection=chain.atoms().extract_i_seq())), file=log)
# per residue detailed counts
print("Per residue (histogram):", file=log)
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs.unit_cell(),
space_group_info = xrs.space_group_info(),
pre_determined_n_real = map_data.accessor().all())
f_calc = xrs.structure_factors(d_min=d_min).f_calc()
fft_map = miller.fft_map(
crystal_gridding = crystal_gridding,
fourier_coefficients = f_calc)
fft_map.apply_sigma_scaling()
map_model = fft_map.real_map_unpadded()
sites_cart = xrs.sites_cart()
cc_per_residue = flex.double()
for rg in ph.residue_groups():
cc = mmtbx.maps.correlation.from_map_map_atoms(
map_1 = map_data,
map_2 = map_model,
sites_cart = sites_cart.select(rg.atoms().extract_i_seq()),
unit_cell = xrs.unit_cell(),
radius = 2.)
cc_per_residue.append(cc)
show_histogram(data=cc_per_residue, n_slots=10, data_min=-1., data_max=1.0,
log=log)
#
"""
THIS IS NOT USED ANYWHERE BUT MIGHT BE USEFUL IN FUTURE, REMOVE LATER
def min_nonbonded_distance(sites_cart, geometry, xray_structure, selection):
selw = xray_structure.selection_within(radius = 3.0, selection =
flex.bool(xray_structure.scatterers().size(), selection)).iselection()
sites_cart_w = sites_cart.select(selw)
#
g = geometry.select(iselection=selw)
pair_proxy_list_sorted=[]
bond_proxies_simple, asu = g.get_all_bond_proxies(
sites_cart = sites_cart_w)
for proxy in bond_proxies_simple:
tmp = list(proxy.i_seqs)
tmp.sort()
pair_proxy_list_sorted.append(tmp)
pair_proxy_list_sorted.sort()
#
dist_min=999
i_min,j_min = None,None
for i, si in enumerate(sites_cart_w):
for j, sj in enumerate(sites_cart_w):
if(i<j):
p = [i,j]
p.sort()
if(not p in pair_proxy_list_sorted):
dist_ij = math.sqrt(
(si[0]-sj[0])**2+
(si[1]-sj[1])**2+
(si[2]-sj[2])**2)
if(dist_ij<dist_min):
dist_min = dist_ij
i_min,j_min = i, j
return i_min,j_min,dist_min
class residue_monitor(object):
def __init__(self,
residue,
id_str,
bond_rmsd=None,
angle_rmsd=None,
map_cc=None,
map_min=None,
map_mean=None,
rotamer_status=None,
ramachandran_status=None,
cbeta_status=None,
min_nonbonded=None):
adopt_init_args(self, locals())
def show(self):
print "%12s %6s %6s %6s %6s %6s %7s %9s %7s %7s"%(
self.id_str,
format_value("%6.3f",self.map_cc),
format_value("%5.2f",self.map_min),
format_value("%5.2f",self.map_mean),
format_value("%6.3f",self.bond_rmsd),
format_value("%6.2f",self.angle_rmsd),
format_value("%6.3f",self.min_nonbonded),
self.rotamer_status,
self.ramachandran_status,
self.cbeta_status)
class structure_monitor(object):
def __init__(self,
pdb_hierarchy,
xray_structure,
map_1, # map data
map_2,
geometry,
atom_radius):
adopt_init_args(self, locals())
self.unit_cell = self.xray_structure.unit_cell()
self.xray_structure = xray_structure.deep_copy_scatterers()
self.unit_cell = self.xray_structure.unit_cell()
self.rotamer_manager = RotamerEval()
#
sc1 = self.xray_structure.sites_cart()
sc2 = self.pdb_hierarchy.atoms().extract_xyz()
assert approx_equal(sc1, sc2, 1.e-3)
#
self.sites_cart = self.xray_structure.sites_cart()
self.sites_frac = self.xray_structure.sites_frac()
#
self.map_cc_whole_unit_cell = None
self.map_cc_around_atoms = None
self.map_cc_per_atom = None
self.rmsd_b = None
self.rmsd_a = None
self.dist_from_start = 0
self.dist_from_previous = 0
self.number_of_rotamer_outliers = 0
self.residue_monitors = None
#
ramalyze_obj = ramalyze(pdb_hierarchy=pdb_hierarchy, outliers_only=False)
self.rotamer_outlier_selection = ramalyze_obj.outlier_selection()
#
cbetadev_obj = cbetadev(
pdb_hierarchy = pdb_hierarchy,
outliers_only = False,
out = null_out())
self.cbeta_outlier_selection = cbetadev_obj.outlier_selection()
#
self.initialize()
def initialize(self):
# residue monitors
print " ID-------|MAP-----------------|RMSD----------|NONB-|ROTAMER--|RAMA---|CBETA--|"
print " |CC MIN MEAN |BOND ANGLE | | | | "
self.residue_monitors = []
sites_cart = self.xray_structure.sites_cart()
for model in self.pdb_hierarchy.models():
for chain in model.chains():
for residue_group in chain.residue_groups():
for conformer in residue_group.conformers():
for residue in conformer.residues():
id_str="%s,%s,%s"%(chain.id,residue.resname,residue.resseq.strip())
selection = residue.atoms().extract_i_seq()
cc = correlation.from_map_map_atoms(
map_1 = self.map_1,
map_2 = self.map_2,
sites_cart = self.sites_cart.select(selection),
unit_cell = self.unit_cell,
radius = self.atom_radius)
rotamer_status = self.rotamer_manager.evaluate_residue(residue)
grm = self.geometry.select(iselection=selection)
es = grm.energies_sites(sites_cart=residue.atoms().extract_xyz())
ramachandran_status="VALID"
if(selection[0] in self.rotamer_outlier_selection):
ramachandran_status="OUTLIER"
cbeta_status="VALID"
if(selection[0] in self.cbeta_outlier_selection):
cbeta_status="OUTLIER"
mnd = min_nonbonded_distance(
sites_cart = sites_cart,
geometry = self.geometry,
xray_structure = self.xray_structure,
selection = selection)
mi,me = self.map_values_min_mean(selection = selection)
rm = residue_monitor(
residue = residue,
id_str = id_str,
bond_rmsd = es.bond_deviations()[2],
angle_rmsd = es.angle_deviations()[2],
map_cc = cc,
map_min = mi,
map_mean = me,
min_nonbonded = mnd[2],
rotamer_status = rotamer_status,
ramachandran_status = ramachandran_status,
cbeta_status = cbeta_status)
self.residue_monitors.append(rm)
rm.show()
def show(self):
print " ID MAP CC BOND ANGLE NONB ROTAMER RAMA CBETA"
for rm in self.residue_monitors:
rm.show()
def map_values_min_mean(self, selection):
map_values = flex.double()
for i in selection:
mv = self.map_1.eight_point_interpolation(self.sites_frac[i])
map_values.append(mv)
mi,ma,me = map_values.min_max_mean().as_tuple()
return mi, me
def map_map_sites_cc(self, selection):
return correlation.from_map_map_atoms(
map_1 = self.map_1,
map_2 = self.map_2,
sites_cart = self.sites_cart.select(selection),
unit_cell = self.unit_cell,
radius = self.atom_radius)
"""
if (__name__ == "__main__"):
t0 = time.time()
run(args=sys.argv[1:])
print()
print("Time:", round(time.time()-t0, 3))
| 36.575269 | 94 | 0.646847 |
45df8aeb85f5138ed03b42971ddf67c5f18482e7 | 15,287 | py | Python | neuralprophet/plot_forecast.py | marco-mazzoli/neural_prophet | 151a1404e3141a3f6a77f04ab7ae02f6bf7810ac | [
"MIT"
] | 2,144 | 2020-06-12T00:50:31.000Z | 2022-03-31T13:51:30.000Z | neuralprophet/plot_forecast.py | marco-mazzoli/neural_prophet | 151a1404e3141a3f6a77f04ab7ae02f6bf7810ac | [
"MIT"
] | 306 | 2020-06-12T21:15:18.000Z | 2022-03-31T23:07:13.000Z | neuralprophet/plot_forecast.py | marco-mazzoli/neural_prophet | 151a1404e3141a3f6a77f04ab7ae02f6bf7810ac | [
"MIT"
] | 285 | 2020-08-21T00:42:14.000Z | 2022-03-29T12:21:59.000Z | import numpy as np
import pandas as pd
import logging
from neuralprophet.utils import set_y_as_percent
from neuralprophet.plot_model_parameters import plot_yearly, plot_weekly, plot_daily, plot_custom_season
log = logging.getLogger("NP.plotting")
try:
from matplotlib import pyplot as plt
from matplotlib.dates import (
MonthLocator,
num2date,
AutoDateLocator,
AutoDateFormatter,
)
from matplotlib.ticker import FuncFormatter
from pandas.plotting import deregister_matplotlib_converters
deregister_matplotlib_converters()
except ImportError:
log.error("Importing matplotlib failed. Plotting will not work.")
def plot(fcst, ax=None, xlabel="ds", ylabel="y", highlight_forecast=None, line_per_origin=False, figsize=(10, 6)):
"""Plot the NeuralProphet forecast
Args:
fcst (pd.DataFrame): output of m.predict.
ax (matplotlib axes): on which to plot.
xlabel (str): label name on X-axis
ylabel (str): label name on Y-axis
highlight_forecast (int): i-th step ahead forecast to highlight.
line_per_origin (bool): print a line per forecast of one per forecast age
figsize (tuple): width, height in inches.
Returns:
A matplotlib figure.
"""
fcst = fcst.fillna(value=np.nan)
if ax is None:
fig = plt.figure(facecolor="w", figsize=figsize)
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
ds = fcst["ds"].dt.to_pydatetime()
yhat_col_names = [col_name for col_name in fcst.columns if "yhat" in col_name]
if highlight_forecast is None or line_per_origin:
for i in range(len(yhat_col_names)):
ax.plot(
ds,
fcst["yhat{}".format(i + 1)],
ls="-",
c="#0072B2",
alpha=0.2 + 2.0 / (i + 2.5),
label="yhat{}".format(i + 1),
)
if highlight_forecast is not None:
if line_per_origin:
num_forecast_steps = sum(fcst["yhat1"].notna())
steps_from_last = num_forecast_steps - highlight_forecast
for i in range(len(yhat_col_names)):
x = ds[-(1 + i + steps_from_last)]
y = fcst["yhat{}".format(i + 1)].values[-(1 + i + steps_from_last)]
ax.plot(x, y, "bx")
else:
ax.plot(
ds, fcst["yhat{}".format(highlight_forecast)], ls="-", c="b", label="yhat{}".format(highlight_forecast)
)
ax.plot(ds, fcst["yhat{}".format(highlight_forecast)], "bx", label="yhat{}".format(highlight_forecast))
ax.plot(ds, fcst["y"], "k.", label="actual y")
# Specify formatting to workaround matplotlib issue #12925
locator = AutoDateLocator(interval_multiples=False)
formatter = AutoDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.grid(True, which="major", c="gray", ls="-", lw=1, alpha=0.2)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
handles, labels = ax.axes.get_legend_handles_labels()
if len(labels) > 10:
ax.legend(handles[:10] + [handles[-1]], labels[:10] + [labels[-1]])
log.warning("Legend is available only for the ten first handles")
else:
ax.legend(handles, labels)
fig.tight_layout()
return fig
def plot_components(m, fcst, forecast_in_focus=None, one_period_per_season=True, residuals=False, figsize=None):
"""Plot the NeuralProphet forecast components.
Args:
m (NeuralProphet): fitted model.
fcst (pd.DataFrame): output of m.predict.
forecast_in_focus (int): n-th step ahead forecast AR-coefficients to plot
one_period_per_season (bool): plot one period per season
instead of the true seasonal components of the forecast.
figsize (tuple): width, height in inches.
None (default): automatic (10, 3 * npanel)
Returns:
A matplotlib figure.
"""
log.debug("Plotting forecast components".format(fcst.head().to_string()))
fcst = fcst.fillna(value=np.nan)
# Identify components to be plotted
# as dict, minimum: {plot_name, comp_name}
components = []
# Plot trend
components.append({"plot_name": "Trend", "comp_name": "trend"})
# Plot seasonalities, if present
if m.model.config_season is not None:
for name in m.model.config_season.periods:
components.append(
{
"plot_name": "{} seasonality".format(name),
"comp_name": name,
}
)
# AR
if m.model.n_lags > 0:
if forecast_in_focus is None:
components.append(
{
"plot_name": "Auto-Regression",
"comp_name": "ar",
"num_overplot": m.n_forecasts,
"bar": True,
}
)
else:
components.append(
{
"plot_name": "AR ({})-ahead".format(forecast_in_focus),
"comp_name": "ar{}".format(forecast_in_focus),
}
)
# 'add_x': True})
# Add Covariates
if m.model.config_covar is not None:
for name in m.model.config_covar.keys():
if forecast_in_focus is None:
components.append(
{
"plot_name": 'Lagged Regressor "{}"'.format(name),
"comp_name": "lagged_regressor_{}".format(name),
"num_overplot": m.n_forecasts,
"bar": True,
}
)
else:
components.append(
{
"plot_name": 'Lagged Regressor "{}" ({})-ahead'.format(name, forecast_in_focus),
"comp_name": "lagged_regressor_{}{}".format(name, forecast_in_focus),
}
)
# 'add_x': True})
# Add Events
if "events_additive" in fcst.columns:
components.append(
{
"plot_name": "Additive Events",
"comp_name": "events_additive",
}
)
if "events_multiplicative" in fcst.columns:
components.append(
{
"plot_name": "Multiplicative Events",
"comp_name": "events_multiplicative",
"multiplicative": True,
}
)
# Add Regressors
if "future_regressors_additive" in fcst.columns:
components.append(
{
"plot_name": "Additive Future Regressors",
"comp_name": "future_regressors_additive",
}
)
if "future_regressors_multiplicative" in fcst.columns:
components.append(
{
"plot_name": "Multiplicative Future Regressors",
"comp_name": "future_regressors_multiplicative",
"multiplicative": True,
}
)
if residuals:
if forecast_in_focus is None and m.n_forecasts > 1:
if fcst["residual1"].count() > 0:
components.append(
{
"plot_name": "Residuals",
"comp_name": "residual",
"num_overplot": m.n_forecasts,
"bar": True,
}
)
else:
ahead = 1 if forecast_in_focus is None else forecast_in_focus
if fcst["residual{}".format(ahead)].count() > 0:
components.append(
{
"plot_name": "Residuals ({})-ahead".format(ahead),
"comp_name": "residual{}".format(ahead),
"bar": True,
}
)
npanel = len(components)
figsize = figsize if figsize else (10, 3 * npanel)
fig, axes = plt.subplots(npanel, 1, facecolor="w", figsize=figsize)
if npanel == 1:
axes = [axes]
multiplicative_axes = []
for ax, comp in zip(axes, components):
name = comp["plot_name"].lower()
if (
name in ["trend"]
or ("residuals" in name and "ahead" in name)
or ("ar" in name and "ahead" in name)
or ("lagged regressor" in name and "ahead" in name)
):
plot_forecast_component(fcst=fcst, ax=ax, **comp)
elif "event" in name or "future regressor" in name:
if "multiplicative" in comp.keys() and comp["multiplicative"]:
multiplicative_axes.append(ax)
plot_forecast_component(fcst=fcst, ax=ax, **comp)
elif "season" in name:
if m.season_config.mode == "multiplicative":
multiplicative_axes.append(ax)
if one_period_per_season:
comp_name = comp["comp_name"]
if comp_name.lower() == "weekly" or m.season_config.periods[comp_name].period == 7:
plot_weekly(m=m, ax=ax, comp_name=comp_name)
elif comp_name.lower() == "yearly" or m.season_config.periods[comp_name].period == 365.25:
plot_yearly(m=m, ax=ax, comp_name=comp_name)
elif comp_name.lower() == "daily" or m.season_config.periods[comp_name].period == 1:
plot_daily(m=m, ax=ax, comp_name=comp_name)
else:
plot_custom_season(m=m, ax=ax, comp_name=comp_name)
else:
comp_name = "season_{}".format(comp["comp_name"])
plot_forecast_component(fcst=fcst, ax=ax, comp_name=comp_name, plot_name=comp["plot_name"])
elif "auto-regression" in name or "lagged regressor" in name or "residuals" in name:
plot_multiforecast_component(fcst=fcst, ax=ax, **comp)
fig.tight_layout()
# Reset multiplicative axes labels after tight_layout adjustment
for ax in multiplicative_axes:
ax = set_y_as_percent(ax)
return fig
def plot_forecast_component(
fcst,
comp_name,
plot_name=None,
ax=None,
figsize=(10, 6),
multiplicative=False,
bar=False,
rolling=None,
add_x=False,
):
"""Plot a particular component of the forecast.
Args:
fcst (pd.DataFrame): output of m.predict.
comp_name (str): Name of the component to plot.
plot_name (str): Name of the plot Title.
ax (matplotlib axis): matplotlib Axes to plot on.
figsize (tuple): width, height in inches. Ignored if ax is not None.
default: (10, 6)
multiplicative (bool): set y axis as percentage
bar (bool): make barplot
rolling (int): rolling average underplot
add_x (bool): add x symbols to plotted points
Returns:
a list of matplotlib artists
"""
fcst = fcst.fillna(value=np.nan)
artists = []
if not ax:
fig = plt.figure(facecolor="w", figsize=figsize)
ax = fig.add_subplot(111)
fcst_t = fcst["ds"].dt.to_pydatetime()
if rolling is not None:
rolling_avg = fcst[comp_name].rolling(rolling, min_periods=1, center=True).mean()
if bar:
artists += ax.bar(fcst_t, rolling_avg, width=1.00, color="#0072B2", alpha=0.5)
else:
artists += ax.plot(fcst_t, rolling_avg, ls="-", color="#0072B2", alpha=0.5)
if add_x:
artists += ax.plot(fcst_t, fcst[comp_name], "bx")
y = fcst[comp_name].values
if "residual" in comp_name:
y[-1] = 0
if bar:
artists += ax.bar(fcst_t, y, width=1.00, color="#0072B2")
else:
artists += ax.plot(fcst_t, y, ls="-", c="#0072B2")
if add_x or sum(fcst[comp_name].notna()) == 1:
artists += ax.plot(fcst_t, y, "bx")
# Specify formatting to workaround matplotlib issue #12925
locator = AutoDateLocator(interval_multiples=False)
formatter = AutoDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.grid(True, which="major", c="gray", ls="-", lw=1, alpha=0.2)
ax.set_xlabel("ds")
if plot_name is None:
plot_name = comp_name
ax.set_ylabel(plot_name)
if multiplicative:
ax = set_y_as_percent(ax)
return artists
def plot_multiforecast_component(
fcst,
comp_name,
plot_name=None,
ax=None,
figsize=(10, 6),
multiplicative=False,
bar=False,
focus=1,
num_overplot=None,
):
"""Plot a particular component of the forecast.
Args:
fcst (pd.DataFrame): output of m.predict.
comp_name (str): Name of the component to plot.
plot_name (str): Name of the plot Title.
ax (matplotlib axis): matplotlib Axes to plot on.
figsize (tuple): width, height in inches. Ignored if ax is not None.
default: (10, 6)
multiplicative (bool): set y axis as percentage
bar (bool): make barplot
focus (int): forecast number to portray in detail.
num_overplot (int): overplot all forecasts up to num
None (default): only plot focus
Returns:
a list of matplotlib artists
"""
artists = []
if not ax:
fig = plt.figure(facecolor="w", figsize=figsize)
ax = fig.add_subplot(111)
fcst_t = fcst["ds"].dt.to_pydatetime()
col_names = [col_name for col_name in fcst.columns if col_name.startswith(comp_name)]
if num_overplot is not None:
assert num_overplot <= len(col_names)
for i in list(range(num_overplot))[::-1]:
y = fcst["{}{}".format(comp_name, i + 1)]
notnull = y.notnull()
y = y.values
alpha_min = 0.2
alpha_softness = 1.2
alpha = alpha_min + alpha_softness * (1.0 - alpha_min) / (i + 1.0 * alpha_softness)
if "residual" not in comp_name:
pass
# fcst_t=fcst_t[notnull]
# y = y[notnull]
else:
y[-1] = 0
if bar:
artists += ax.bar(fcst_t, y, width=1.00, color="#0072B2", alpha=alpha)
else:
artists += ax.plot(fcst_t, y, ls="-", color="#0072B2", alpha=alpha)
if num_overplot is None or focus > 1:
y = fcst["{}{}".format(comp_name, focus)]
notnull = y.notnull()
y = y.values
if "residual" not in comp_name:
fcst_t = fcst_t[notnull]
y = y[notnull]
else:
y[-1] = 0
if bar:
artists += ax.bar(fcst_t, y, width=1.00, color="b")
else:
artists += ax.plot(fcst_t, y, ls="-", color="b")
# Specify formatting to workaround matplotlib issue #12925
locator = AutoDateLocator(interval_multiples=False)
formatter = AutoDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.grid(True, which="major", color="gray", ls="-", lw=1, alpha=0.2)
ax.set_xlabel("ds")
if plot_name is None:
plot_name = comp_name
ax.set_ylabel(plot_name)
if multiplicative:
ax = set_y_as_percent(ax)
return artists
| 36.836145 | 119 | 0.566494 |
af3cc33dcc492609cd131380aec042e6470f8125 | 2,225 | py | Python | PyFlow/Packages/PyFlowBase/Nodes/doOnce.py | luzpaz/PyFlow | e00642794051b1d9b7b2665eee38567e9763558d | [
"Apache-2.0"
] | 1,463 | 2019-07-29T15:45:22.000Z | 2022-03-31T23:32:13.000Z | PyFlow/Packages/PyFlowBase/Nodes/doOnce.py | luzpaz/PyFlow | e00642794051b1d9b7b2665eee38567e9763558d | [
"Apache-2.0"
] | 58 | 2019-07-31T07:58:57.000Z | 2022-02-23T05:46:08.000Z | PyFlow/Packages/PyFlowBase/Nodes/doOnce.py | luzpaz/PyFlow | e00642794051b1d9b7b2665eee38567e9763558d | [
"Apache-2.0"
] | 169 | 2019-08-03T16:38:57.000Z | 2022-03-31T14:20:12.000Z | ## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from PyFlow.Core import NodeBase
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
from PyFlow.Core.Common import *
from PyFlow.Packages.PyFlowBase.Nodes import FLOW_CONTROL_COLOR
class doOnce(NodeBase):
def __init__(self, name):
super(doOnce, self).__init__(name)
self.inExec = self.createInputPin('inExec', 'ExecPin', None, self.compute)
self.reset = self.createInputPin('Reset', 'ExecPin', None, self.OnReset)
self.bStartClosed = self.createInputPin('Start closed', 'BoolPin')
self.completed = self.createOutputPin('Completed', 'ExecPin')
self.bClosed = False
self.headerColor = FLOW_CONTROL_COLOR
def OnReset(self):
self.bClosed = False
self.bStartClosed.setData(False)
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
helper.addInputDataType('ExecPin')
helper.addInputDataType('BoolPin')
helper.addOutputDataType('ExecPin')
helper.addInputStruct(StructureType.Single)
helper.addOutputStruct(StructureType.Single)
return helper
@staticmethod
def category():
return 'FlowControl'
@staticmethod
def keywords():
return []
@staticmethod
def description():
return 'Will fire off an execution pin just once. But can reset.'
def compute(self, *args, **kwargs):
bStartClosed = self.bStartClosed.getData()
if not self.bClosed and not bStartClosed:
self.completed.call(*args, **kwargs)
self.bClosed = True
self.bStartClosed.setData(False)
| 34.230769 | 82 | 0.694831 |
74b1dbb85158cd58e4467db965d09bc318813ab4 | 21,403 | py | Python | ginga/canvas/types/image.py | profxj/ginga | a5f447b760ac38dafa52181b3f99156545a6f2e7 | [
"BSD-3-Clause"
] | null | null | null | ginga/canvas/types/image.py | profxj/ginga | a5f447b760ac38dafa52181b3f99156545a6f2e7 | [
"BSD-3-Clause"
] | 2 | 2017-07-25T15:22:13.000Z | 2020-04-25T19:34:30.000Z | ginga/canvas/types/image.py | profxj/ginga | a5f447b760ac38dafa52181b3f99156545a6f2e7 | [
"BSD-3-Clause"
] | 3 | 2018-02-09T20:06:30.000Z | 2020-03-30T02:31:44.000Z | #
# images.py -- classes for images drawn on ginga canvases.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import time
import numpy as np
from ginga.canvas.CanvasObject import (CanvasObjectBase, _bool, _color,
Point, MovePoint,
register_canvas_types,
colors_plus_none, coord_names)
from ginga.misc.ParamSet import Param
from ginga.misc import Bunch
from ginga import trcalc
from .mixins import OnePointMixin
class ImageP(OnePointMixin, CanvasObjectBase):
"""Draws an image on a ImageViewCanvas.
Parameters are:
x, y: 0-based coordinates of one corner in the data space
image: the image, which must be an RGBImage object
"""
@classmethod
def get_params_metadata(cls):
return [
Param(name='coord', type=str, default='data',
valid=coord_names,
description="Set type of coordinates"),
Param(name='x', type=float, default=0.0, argpos=0,
description="X coordinate of corner of object"),
Param(name='y', type=float, default=0.0, argpos=1,
description="Y coordinate of corner of object"),
## Param(name='image', type=?, argpos=2,
## description="Image to be displayed on canvas"),
Param(name='scale_x', type=float, default=1.0,
description="Scaling factor for X dimension of object"),
Param(name='scale_y', type=float, default=1.0,
description="Scaling factor for Y dimension of object"),
Param(name='interpolation', type=str, default=None,
description="Interpolation method for scaling pixels"),
Param(name='linewidth', type=int, default=0,
min=0, max=20, widget='spinbutton', incr=1,
description="Width of outline"),
Param(name='linestyle', type=str, default='solid',
valid=['solid', 'dash'],
description="Style of outline (default: solid)"),
Param(name='color',
valid=colors_plus_none, type=_color, default='lightgreen',
description="Color of outline"),
Param(name='alpha', type=float, default=1.0,
min=0.0, max=1.0, widget='spinfloat', incr=0.05,
description="Opacity of outline"),
Param(name='showcap', type=_bool,
default=False, valid=[False, True],
description="Show caps for this object"),
Param(name='flipy', type=_bool,
default=False, valid=[False, True],
description="Flip image in Y direction"),
Param(name='optimize', type=_bool,
default=True, valid=[False, True],
description="Optimize rendering for this object"),
]
def __init__(self, pt, image, alpha=1.0, scale_x=1.0, scale_y=1.0,
interpolation=None,
linewidth=0, linestyle='solid', color='lightgreen',
showcap=False, flipy=False, optimize=True,
**kwdargs):
self.kind = 'image'
points = np.asarray([pt], dtype=np.float)
CanvasObjectBase.__init__(self, points=points, image=image, alpha=alpha,
scale_x=scale_x, scale_y=scale_y,
interpolation=interpolation,
linewidth=linewidth, linestyle=linestyle,
color=color, showcap=showcap,
flipy=flipy, optimize=optimize,
**kwdargs)
OnePointMixin.__init__(self)
# The cache holds intermediate step results by viewer.
# Depending on value of `whence` they may not need to be recomputed.
self._cache = {}
self._zorder = 0
# images are not editable by default
self.editable = False
self.enable_callback('image-set')
def get_zorder(self):
return self._zorder
def set_zorder(self, zorder):
self._zorder = zorder
for viewer in self._cache:
viewer.reorder_layers()
viewer.redraw(whence=2)
def in_cache(self, viewer):
return viewer in self._cache
def get_cache(self, viewer):
if viewer in self._cache:
cache = self._cache[viewer]
else:
cache = self._reset_cache(Bunch.Bunch())
self._cache[viewer] = cache
return cache
def invalidate_cache(self, viewer):
cache = self.get_cache(viewer)
self._reset_cache(cache)
return cache
def draw(self, viewer):
"""General draw method for RGB image types.
Note that actual insertion of the image into the output is
handled in `draw_image()`
"""
cache = self.get_cache(viewer)
if not cache.drawn:
cache.drawn = True
viewer.redraw(whence=2)
cpoints = self.get_cpoints(viewer)
cr = viewer.renderer.setup_cr(self)
# draw optional border
if self.linewidth > 0:
cr.draw_polygon(cpoints)
if self.showcap:
self.draw_caps(cr, self.cap, cpoints)
def draw_image(self, viewer, dstarr, whence=0.0):
if self.image is None:
return
t1 = t2 = time.time()
cache = self.get_cache(viewer)
self._common_draw(viewer, dstarr, cache, whence)
if cache.cutout is None:
return
t2 = time.time()
dst_order = viewer.get_rgb_order()
image_order = self.image.get_order()
# composite the image into the destination array at the
# calculated position
trcalc.overlay_image(dstarr, cache.cvs_pos, cache.cutout,
dst_order=dst_order, src_order=image_order,
alpha=self.alpha, fill=True, flipy=False)
t3 = time.time()
self.logger.debug("draw: t2=%.4f t3=%.4f total=%.4f" % (
t2 - t1, t3 - t2, t3 - t1))
def _common_draw(self, viewer, dstarr, cache, whence):
# internal common drawing phase for all images
if self.image is None:
return
if (whence <= 0.0) or (cache.cutout is None) or (not self.optimize):
# get extent of our data coverage in the window
pts = np.asarray(viewer.get_draw_rect()).T
xmin = int(np.min(pts[0]))
ymin = int(np.min(pts[1]))
xmax = int(np.ceil(np.max(pts[0])))
ymax = int(np.ceil(np.max(pts[1])))
# get destination location in data_coords
dst_x, dst_y = self.crdmap.to_data((self.x, self.y))
a1, b1, a2, b2 = 0, 0, self.image.width - 1, self.image.height - 1
# calculate the cutout that we can make and scale to merge
# onto the final image--by only cutting out what is necessary
# this speeds scaling greatly at zoomed in sizes
((dst_x, dst_y), (a1, b1), (a2, b2)) = \
trcalc.calc_image_merge_clip((xmin, ymin), (xmax, ymax),
(dst_x, dst_y),
(a1, b1), (a2, b2))
# is image completely off the screen?
if (a2 - a1 <= 0) or (b2 - b1 <= 0):
# no overlay needed
cache.cutout = None
return
# cutout and scale the piece appropriately by the viewer scale
scale_x, scale_y = viewer.get_scale_xy()
# scale additionally by our scale
_scale_x, _scale_y = scale_x * self.scale_x, scale_y * self.scale_y
interp = self.interpolation
if interp is None:
t_ = viewer.get_settings()
interp = t_.get('interpolation', 'basic')
# previous choice might not be available if preferences
# were saved when opencv was being used (and not used now);
# if so, silently default to "basic"
if interp not in trcalc.interpolation_methods:
interp = 'basic'
res = self.image.get_scaled_cutout2((a1, b1), (a2, b2),
(_scale_x, _scale_y),
method=interp)
data = res.data
if self.flipy:
data = np.flipud(data)
cache.cutout = data
# calculate our offset from the pan position
pan_x, pan_y = viewer.get_pan()
pan_off = viewer.data_off
pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
off_x, off_y = dst_x - pan_x, dst_y - pan_y
# scale offset
off_x *= scale_x
off_y *= scale_y
# dst position in the pre-transformed array should be calculated
# from the center of the array plus offsets
ht, wd, dp = dstarr.shape
cvs_x = int(np.round(wd / 2.0 + off_x))
cvs_y = int(np.round(ht / 2.0 + off_y))
cache.cvs_pos = (cvs_x, cvs_y)
def _reset_cache(self, cache):
cache.setvals(cutout=None, drawn=False, cvs_pos=(0, 0))
return cache
def reset_optimize(self):
for cache in self._cache.values():
self._reset_cache(cache)
def get_image(self):
return self.image
def set_image(self, image):
self.image = image
self.reset_optimize()
self.make_callback('image-set', image)
def get_scaled_wdht(self):
width = int(self.image.width * self.scale_x)
height = int(self.image.height * self.scale_y)
return (width, height)
def get_coords(self):
x1, y1 = self.crdmap.to_data((self.x, self.y))
wd, ht = self.get_scaled_wdht()
x2, y2 = x1 + wd - 1, y1 + ht - 1
return (x1, y1, x2, y2)
def get_llur(self):
return self.get_coords()
def get_center_pt(self):
wd, ht = self.get_scaled_wdht()
x1, y1, x2, y2 = self.get_coords()
return ((x1 + x2) / 2.0, (y1 + y2) / 2.0)
def get_points(self):
x1, y1, x2, y2 = self.get_coords()
return [(x1, y1), (x2, y1), (x2, y2), (x1, y2)]
def contains_pts(self, pts):
x_arr, y_arr = np.asarray(pts).T
x1, y1, x2, y2 = self.get_llur()
contains = np.logical_and(
np.logical_and(x1 <= x_arr, x_arr <= x2),
np.logical_and(y1 <= y_arr, y_arr <= y2))
return contains
def rotate(self, theta, xoff=0, yoff=0):
raise ValueError("Images cannot be rotated")
def setup_edit(self, detail):
detail.center_pos = self.get_center_pt()
detail.scale_x = self.scale_x
detail.scale_y = self.scale_y
def set_edit_point(self, i, pt, detail):
if i == 0:
self.move_to_pt(pt)
elif i == 1:
scale_x, scale_y = self.calc_dual_scale_from_pt(pt, detail)
self.scale_x = detail.scale_x * scale_x
elif i == 2:
scale_x, scale_y = self.calc_dual_scale_from_pt(pt, detail)
self.scale_y = detail.scale_y * scale_y
elif i == 3:
scale_x, scale_y = self.calc_dual_scale_from_pt(pt, detail)
self.scale_x = detail.scale_x * scale_x
self.scale_y = detail.scale_y * scale_y
else:
raise ValueError("No point corresponding to index %d" % (i))
self.reset_optimize()
def get_edit_points(self, viewer):
x1, y1, x2, y2 = self.get_coords()
return [MovePoint(*self.get_center_pt()), # location
Point(x2, (y1 + y2) / 2.), # width scale
Point((x1 + x2) / 2., y2), # height scale
Point(x2, y2), # both scale
]
def scale_by_factors(self, factors):
scale_x, scale_y = factors[:2]
self.scale_x *= scale_x
self.scale_y *= scale_y
self.reset_optimize()
def set_scale(self, scale_x, scale_y):
self.scale_x = scale_x
self.scale_y = scale_y
self.reset_optimize()
def set_origin(self, x, y):
self.x, self.y = x, y
self.reset_optimize()
class Image(ImageP):
def __init__(self, x, y, image, alpha=1.0, scale_x=1.0, scale_y=1.0,
interpolation=None,
linewidth=0, linestyle='solid', color='lightgreen',
showcap=False, flipy=False, optimize=True,
**kwdargs):
ImageP.__init__(self, (x, y), image, alpha=alpha,
scale_x=scale_x, scale_y=scale_y,
interpolation=interpolation,
linewidth=linewidth, linestyle=linestyle,
color=color, showcap=showcap,
flipy=flipy, optimize=optimize,
**kwdargs)
class NormImageP(ImageP):
"""Draws an image on a ImageViewCanvas.
Parameters are:
x, y: 0-based coordinates of one corner in the data space
image: the image, which must be an RGBImage object
"""
@classmethod
def get_params_metadata(cls):
return [
Param(name='coord', type=str, default='data',
valid=coord_names,
description="Set type of coordinates"),
Param(name='x', type=float, default=0.0, argpos=0,
description="X coordinate of corner of object"),
Param(name='y', type=float, default=0.0, argpos=1,
description="Y coordinate of corner of object"),
## Param(name='image', type=?, argpos=2,
## description="Image to be displayed on canvas"),
Param(name='scale_x', type=float, default=1.0,
description="Scaling factor for X dimension of object"),
Param(name='scale_y', type=float, default=1.0,
description="Scaling factor for Y dimension of object"),
Param(name='interpolation', type=str, default=None,
description="Interpolation method for scaling pixels"),
Param(name='linewidth', type=int, default=0,
min=0, max=20, widget='spinbutton', incr=1,
description="Width of outline"),
Param(name='linestyle', type=str, default='solid',
valid=['solid', 'dash'],
description="Style of outline (default: solid)"),
Param(name='color',
valid=colors_plus_none, type=_color, default='lightgreen',
description="Color of outline"),
Param(name='alpha', type=float, default=1.0,
min=0.0, max=1.0, widget='spinfloat', incr=0.05,
description="Opacity of outline"),
Param(name='showcap', type=_bool,
default=False, valid=[False, True],
description="Show caps for this object"),
## Param(name='flipy', type=_bool,
## default=False, valid=[False, True],
## description="Flip image in Y direction"),
Param(name='optimize', type=_bool,
default=True, valid=[False, True],
description="Optimize rendering for this object"),
## Param(name='cuts', type=tuple, default=None,
## description="Tuple of (lo, hi) cut levels for image"),
## Param(name='rgbmap', type=?,
## description="RGB mapper for the image"),
## Param(name='autocuts', type=?,
## description="Cuts manager for the image"),
]
def __init__(self, pt, image, alpha=1.0, scale_x=1.0, scale_y=1.0,
interpolation=None, cuts=None, linewidth=0, linestyle='solid',
color='lightgreen', showcap=False,
optimize=True, rgbmap=None, autocuts=None, **kwdargs):
self.kind = 'normimage'
super(NormImageP, self).__init__(pt, image, alpha=alpha,
scale_x=scale_x, scale_y=scale_y,
interpolation=interpolation,
linewidth=linewidth, linestyle=linestyle,
color=color,
showcap=showcap, optimize=optimize,
**kwdargs)
self.rgbmap = rgbmap
self.cuts = cuts
self.autocuts = autocuts
def draw_image(self, viewer, dstarr, whence=0.0):
if self.image is None:
return
t1 = t2 = t3 = t4 = time.time()
cache = self.get_cache(viewer)
self._common_draw(viewer, dstarr, cache, whence)
if cache.cutout is None:
return
t2 = time.time()
if self.rgbmap is not None:
rgbmap = self.rgbmap
else:
rgbmap = viewer.get_rgbmap()
image_order = self.image.get_order()
if (whence <= 0.0) or (not self.optimize):
# if image has an alpha channel, then strip it off and save
# it until it is recombined later with the colorized output
# this saves us having to deal with an alpha band in the
# cuts leveling and RGB mapping routines
img_arr = cache.cutout
if 'A' not in image_order:
cache.alpha = None
else:
# normalize alpha array to the final output range
mn, mx = trcalc.get_minmax_dtype(img_arr.dtype)
a_idx = image_order.index('A')
cache.alpha = (img_arr[..., a_idx] / mx *
rgbmap.maxc).astype(rgbmap.dtype)
cache.cutout = img_arr[..., 0:a_idx]
if (whence <= 1.0) or (cache.prergb is None) or (not self.optimize):
# apply visual changes prior to color mapping (cut levels, etc)
vmax = rgbmap.get_hash_size() - 1
newdata = self.apply_visuals(viewer, cache.cutout, 0, vmax)
# result becomes an index array fed to the RGB mapper
if not np.issubdtype(newdata.dtype, np.dtype('uint')):
newdata = newdata.astype(np.uint)
idx = newdata
self.logger.debug("shape of index is %s" % (str(idx.shape)))
cache.prergb = idx
t3 = time.time()
dst_order = viewer.get_rgb_order()
if (whence <= 2.0) or (cache.rgbarr is None) or (not self.optimize):
# get RGB mapped array
rgbobj = rgbmap.get_rgbarray(cache.prergb, order=dst_order,
image_order=image_order)
cache.rgbarr = rgbobj.get_array(dst_order)
if cache.alpha is not None and 'A' in dst_order:
a_idx = dst_order.index('A')
cache.rgbarr[..., a_idx] = cache.alpha
t4 = time.time()
# composite the image into the destination array at the
# calculated position
trcalc.overlay_image(dstarr, cache.cvs_pos, cache.rgbarr,
dst_order=dst_order, src_order=dst_order,
alpha=self.alpha, fill=True, flipy=False)
t5 = time.time()
self.logger.debug("draw: t2=%.4f t3=%.4f t4=%.4f t5=%.4f total=%.4f" % (
t2 - t1, t3 - t2, t4 - t3, t5 - t4, t5 - t1))
def apply_visuals(self, viewer, data, vmin, vmax):
if self.autocuts is not None:
autocuts = self.autocuts
else:
autocuts = viewer.autocuts
# Apply cut levels
if self.cuts is not None:
loval, hival = self.cuts
else:
loval, hival = viewer.t_['cuts']
newdata = autocuts.cut_levels(data, loval, hival,
vmin=vmin, vmax=vmax)
return newdata
def _reset_cache(self, cache):
cache.setvals(cutout=None, alpha=None, prergb=None, rgbarr=None,
drawn=False, cvs_pos=(0, 0))
return cache
def set_image(self, image):
self.image = image
self.reset_optimize()
self.make_callback('image-set', image)
def scale_by(self, scale_x, scale_y):
self.scale_x *= scale_x
self.scale_y *= scale_y
self.reset_optimize()
class NormImage(NormImageP):
def __init__(self, x, y, image, alpha=1.0, scale_x=1.0, scale_y=1.0,
interpolation=None, cuts=None, linewidth=0, linestyle='solid',
color='lightgreen', showcap=False,
optimize=True, rgbmap=None, autocuts=None, **kwdargs):
NormImageP.__init__(self, (x, y), image, alpha=alpha,
scale_x=scale_x, scale_y=scale_y,
interpolation=interpolation,
linewidth=linewidth, linestyle=linestyle,
color=color, showcap=showcap, optimize=optimize,
**kwdargs)
# register our types
register_canvas_types(dict(image=Image, normimage=NormImage))
#END
| 39.127971 | 82 | 0.545952 |
7a3dab0c90a4251f5762053ed8ed15c5aadd62f6 | 849 | py | Python | search/util.py | casperschmit/cct-selector | 46d6e7524fa0d53fd389d136ef33fa92b20e4c03 | [
"MIT"
] | null | null | null | search/util.py | casperschmit/cct-selector | 46d6e7524fa0d53fd389d136ef33fa92b20e4c03 | [
"MIT"
] | null | null | null | search/util.py | casperschmit/cct-selector | 46d6e7524fa0d53fd389d136ef33fa92b20e4c03 | [
"MIT"
] | null | null | null | import spacy
import re
from gensim.parsing.preprocessing import remove_stopwords, strip_multiple_whitespaces
# Also function used in search package
def split_content(content):
sentence_structure = re.compile(r'([A-Z\xc4\xc5\xd6][^\.!?]*[\.!?])', re.M)
sentences = sentence_structure.findall(content)
return sentences
# Process content for search
def process_content(content, nlp):
temp = strip_multiple_whitespaces(content) # Strip \n \t and other whitespace chars
temp = remove_stopwords(temp) # Remove stop words: if, a, with etc.
# Increase max length of nlp if text is too long. We do not need parser and ner for lemmatizing, so it's ok.
if len(temp) > 1000000:
nlp.max_length = len(temp) + 100
doc = nlp(temp) # Lemmatize words
temp = " ".join([token.lemma_ for token in doc])
return temp
| 36.913043 | 112 | 0.709069 |
196822d6359430481b373df03bbf9f842144e83f | 3,356 | py | Python | examples/Redfish/ex22_dump_iml.py | killionadmin/ILOscripts | 951f53df9bf58bc7b186a501e2d123645f0e55a3 | [
"Apache-2.0"
] | 27 | 2015-04-07T13:44:20.000Z | 2016-03-26T01:23:58.000Z | examples/Redfish/ex22_dump_iml.py | killionadmin/ILOscripts | 951f53df9bf58bc7b186a501e2d123645f0e55a3 | [
"Apache-2.0"
] | 5 | 2017-05-11T23:36:34.000Z | 2018-05-27T09:11:17.000Z | examples/Redfish/ex22_dump_iml.py | killionadmin/ILOscripts | 951f53df9bf58bc7b186a501e2d123645f0e55a3 | [
"Apache-2.0"
] | 13 | 2015-03-25T19:03:36.000Z | 2016-03-11T13:21:18.000Z | # Copyright 2016 Hewlett Packard Enterprise Development, LP.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _redfishobject import RedfishObject
from redfish.rest.v1 import ServerDownOrUnreachableError
def ex22_dump_iml(redfishobj):
sys.stdout.write("\nEXAMPLE 22: Dump Integrated Management Log\n")
instances = redfishobj.search_for_type("LogService.")
for instance in instances:
if instance["@odata.id"].endswith("IML/"):
tmp = redfishobj.redfish_get(instance["@odata.id"])
rsp = redfishobj.redfish_get(tmp.dict["Entries"]["@odata.id"])
for entry in rsp.dict["Members"]:
response = redfishobj.redfish_get(entry["@odata.id"])
if redfishobj.typepath.defs.isgen9:
oemhpdict = response.dict["Oem"]["Hp"]
else:
oemhpdict = response.dict["Oem"]["Hpe"]
sys.stdout.write(response.dict["Severity"] + ": Class " + \
str(oemhpdict["Class"]) + " / Code " + str(oemhpdict["Code"]) + \
":\t" + response.dict["Message"] + "\n")
while 'NextPage' in rsp.dict["Members"]:
response = redfishobj.redfish_get(entry["@odata.id"] + \
'?page=' + \
str(response.dict["Entries"] \
['NextPage']['page']))
sys.stdout.write(response.dict["Severity"] + ": Class " + \
str(oemhpdict["Class"]) + " / Code " + str(oemhpdict["Code"]) \
+ ":\t" + response.dict["Message"] + "\n")
redfishobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
# Create a REDFISH object
try:
REDFISH_OBJ = RedfishObject(iLO_https_url, iLO_account, iLO_password)
except ServerDownOrUnreachableError, excp:
sys.stderr.write("ERROR: server not reachable or doesn't support " \
"RedFish.\n")
sys.exit()
except Exception, excp:
raise excp
ex22_dump_iml(REDFISH_OBJ)
| 44.746667 | 89 | 0.568832 |
dfb03ed0f14633e9290e89ccad9ca0a47ed4fd4a | 506 | py | Python | scripts/simple_collectible/show_tokenuri.py | litvinaugang/nft-ipfs-metamask | 7b48cecb337a1c5abbd19b425d0ddcd0e9f5445e | [
"MIT"
] | null | null | null | scripts/simple_collectible/show_tokenuri.py | litvinaugang/nft-ipfs-metamask | 7b48cecb337a1c5abbd19b425d0ddcd0e9f5445e | [
"MIT"
] | null | null | null | scripts/simple_collectible/show_tokenuri.py | litvinaugang/nft-ipfs-metamask | 7b48cecb337a1c5abbd19b425d0ddcd0e9f5445e | [
"MIT"
] | null | null | null | from brownie import ArtBot, accounts, network, config
from metadata import sample_metadata
from scripts.helpful_scripts import OPENSEA_FORMAT
def main():
print("Working on " + network.show_active())
simple_collectible = ArtBot[len(ArtBot) - 1]
number_of_tokens = simple_collectible.tokenIdCounter()
print("number_of_tokens deployed is {}".format(number_of_tokens))
for token_id in range(1,number_of_tokens):
print("token uri: {}".format(simple_collectible.tokenURI(token_id))) | 42.166667 | 76 | 0.76087 |
8eb1040dc779c42036c5c4946cfa45bf03eb95bd | 8,187 | py | Python | pandapower/plotting/plotly/vlevel_plotly.py | Zamwell/pandapower | ce51946342109e969b87b60c8883d7eec02d3060 | [
"BSD-3-Clause"
] | 1 | 2019-06-16T05:06:03.000Z | 2019-06-16T05:06:03.000Z | pandapower/plotting/plotly/vlevel_plotly.py | Zamwell/pandapower | ce51946342109e969b87b60c8883d7eec02d3060 | [
"BSD-3-Clause"
] | null | null | null | pandapower/plotting/plotly/vlevel_plotly.py | Zamwell/pandapower | ce51946342109e969b87b60c8883d7eec02d3060 | [
"BSD-3-Clause"
] | 1 | 2022-02-07T14:11:03.000Z | 2022-02-07T14:11:03.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pandas as pd
from pandapower.plotting.generic_geodata import create_generic_coordinates
from pandapower.plotting.plotly.traces import create_bus_trace, create_line_trace, create_trafo_trace, draw_traces, \
version_check
from pandapower.plotting.plotly.get_colors import get_plotly_color_palette
from pandapower.plotting.plotly.mapbox_plot import *
from pandapower.topology import create_nxgraph, connected_components
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def vlevel_plotly(net, respect_switches=True, use_line_geodata=None, colors_dict=None, on_map=False,
projection=None, map_style='basic', figsize=1, aspectratio='auto', line_width=2,
bus_size=10):
"""
Plots a pandapower network in plotly
using lines/buses colors according to the voltage level they belong to.
If no geodata is available, artificial geodata is generated. For advanced plotting see the tutorial
INPUT:
**net** - The pandapower format network. If none is provided, mv_oberrhein() will be
plotted as an example
OPTIONAL:
**respect_switches** (bool, True) - Respect switches when artificial geodata is created
**use_line_geodata** (bool, True) - defines if lines patches are based on net.line_geodata of the lines (True)
or on net.bus_geodata of the connected buses (False)
*colors_dict** (dict, None) - dictionary for customization of colors for each voltage level in the form:
voltage_kv : color
**on_map** (bool, False) - enables using mapbox plot in plotly If provided geodata are not real
geo-coordinates in lon/lat form, on_map will be set to False.
**projection** (String, None) - defines a projection from which network geo-data will be transformed to
lat-long. For each projection a string can be found at http://spatialreference.org/ref/epsg/
**map_style** (str, 'basic') - enables using mapbox plot in plotly
- 'streets'
- 'bright'
- 'light'
- 'dark'
- 'satellite'
**figsize** (float, 1) - aspectratio is multiplied by it in order to get final image size
**aspectratio** (tuple, 'auto') - when 'auto' it preserves original aspect ratio of the network geodata
any custom aspectration can be given as a tuple, e.g. (1.2, 1)
**line_width** (float, 1.0) - width of lines
**bus_size** (float, 10.0) - size of buses to plot.
"""
version_check()
# create geocoord if none are available
if 'line_geodata' not in net:
net.line_geodata = pd.DataFrame(columns=['coords'])
if 'bus_geodata' not in net:
net.bus_geodata = pd.DataFrame(columns=["x", "y"])
if len(net.line_geodata) == 0 and len(net.bus_geodata) == 0:
logger.warning("No or insufficient geodata available --> Creating artificial coordinates." +
" This may take some time")
create_generic_coordinates(net, respect_switches=respect_switches)
if on_map:
logger.warning("Map plots not available with artificial coordinates and will be disabled!")
on_map = False
# check if geodata are real geographycal lat/lon coordinates using geopy
if on_map and projection is not None:
geo_data_to_latlong(net, projection=projection)
# if bus geodata is available, but no line geodata
if use_line_geodata is None:
use_line_geodata = False if len(net.line_geodata) == 0 else True
elif use_line_geodata and len(net.line_geodata) == 0:
logger.warning("No or insufficient line geodata available --> only bus geodata will be used.")
use_line_geodata = False
# getting connected componenets without consideration of trafos
graph = create_nxgraph(net, include_trafos=False)
vlev_buses = connected_components(graph)
# getting unique sets of buses for each voltage level
vlev_bus_dict = {}
for vl_buses in vlev_buses:
if net.bus.loc[vl_buses, 'vn_kv'].unique().shape[0] > 1:
logger.warning('buses from the same voltage level does not have the same vn_kv !?')
vn_kv = net.bus.loc[vl_buses, 'vn_kv'].unique()[0]
if vlev_bus_dict.get(vn_kv):
vlev_bus_dict[vn_kv].update(vl_buses)
else:
vlev_bus_dict[vn_kv] = vl_buses
# create a default colormap for voltage levels
nvlevs = len(vlev_bus_dict)
colors = get_plotly_color_palette(nvlevs)
colors_dict = dict(zip(vlev_bus_dict.keys(), colors))
# creating traces for buses and lines for each voltage level
bus_traces = []
line_traces = []
for vn_kv, buses_vl in vlev_bus_dict.items():
vlev_color = colors_dict[vn_kv]
bus_trace_vlev = create_bus_trace(net, buses=buses_vl, size=bus_size, legendgroup=str(vn_kv),
color=vlev_color, trace_name='buses {0} kV'.format(vn_kv))
if bus_trace_vlev is not None:
bus_traces += bus_trace_vlev
vlev_lines = net.line[net.line.from_bus.isin(buses_vl) & net.line.to_bus.isin(buses_vl)].index.tolist()
line_trace_vlev = create_line_trace(net, lines=vlev_lines, use_line_geodata=use_line_geodata,
respect_switches=respect_switches, legendgroup=str(vn_kv),
color=vlev_color, width=line_width, trace_name='lines {0} kV'.format(vn_kv))
if line_trace_vlev is not None:
line_traces += line_trace_vlev
trafo_traces = create_trafo_trace(net, color='gray', width=line_width * 2)
draw_traces(line_traces + trafo_traces + bus_traces, showlegend=True,
aspectratio=aspectratio, on_map=on_map, map_style=map_style, figsize=figsize)
if __name__ == '__main__':
from pandapower.plotting.plotly import simple_plotly
from pandapower.networks import mv_oberrhein
from pandapower import runpp
net = mv_oberrhein()
vlevel_plotly(net)
runpp(net)
line_width=2
bus_size=10
use_line_geodata = None
graph = create_nxgraph(net, include_trafos=False)
vlev_buses = connected_components(graph)
respect_switches = True
# getting unique sets of buses for each voltage level
vlev_bus_dict = {}
for vl_buses in vlev_buses:
if net.bus.loc[vl_buses, 'vn_kv'].unique().shape[0] > 1:
logger.warning('buses from the same voltage level does not have the same vn_kv !?')
vn_kv = net.bus.loc[vl_buses, 'vn_kv'].unique()[0]
if vlev_bus_dict.get(vn_kv):
vlev_bus_dict[vn_kv].update(vl_buses)
else:
vlev_bus_dict[vn_kv] = vl_buses
# create a default colormap for voltage levels
nvlevs = len(vlev_bus_dict)
colors = get_plotly_color_palette(nvlevs)
colors_dict = dict(zip(vlev_bus_dict.keys(), colors))
# creating traces for buses and lines for each voltage level
bus_traces = []
line_traces = []
for vn_kv, buses_vl in vlev_bus_dict.items():
vlev_color = colors_dict[vn_kv]
bus_trace_vlev = create_bus_trace(net, buses=buses_vl, size=bus_size, legendgroup=str(vn_kv),
color=vlev_color, trace_name='buses {0} kV'.format(vn_kv))
if bus_trace_vlev is not None:
bus_traces += bus_trace_vlev
vlev_lines = net.line[net.line.from_bus.isin(buses_vl) & net.line.to_bus.isin(buses_vl)].index.tolist()
print(vlev_lines)
line_trace_vlev = create_line_trace(net, lines=vlev_lines, use_line_geodata=use_line_geodata,
respect_switches=respect_switches, legendgroup=str(vn_kv),
color="r", width=line_width, trace_name='lines {0} kV'.format(vn_kv))
if line_trace_vlev is not None:
line_traces += line_trace_vlev
| 44.737705 | 120 | 0.670942 |
b2441f8881f119997863cbae1a54fb3a96a7993c | 1,930 | py | Python | src/fuse_eval_mpeg.py | mauriceqch/pcc_geo_cnn | 22bbf081ffe7b77c9308f54c15490da60e78803c | [
"MIT"
] | 46 | 2019-06-17T21:13:57.000Z | 2022-03-29T07:52:11.000Z | src/fuse_eval_mpeg.py | mauriceqch/pcc_geo_cnn | 22bbf081ffe7b77c9308f54c15490da60e78803c | [
"MIT"
] | 11 | 2019-07-05T09:51:08.000Z | 2022-02-06T14:00:03.000Z | src/fuse_eval_mpeg.py | mauriceqch/pcc_geo_cnn | 22bbf081ffe7b77c9308f54c15490da60e78803c | [
"MIT"
] | 14 | 2019-04-10T01:09:36.000Z | 2022-03-30T01:24:57.000Z | ################################################################################
### Init
################################################################################
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
import os
import argparse
import pandas as pd
################################################################################
### Script
################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='fuse_eval_mpeg.py',
description='Fusion of eval results with custom MPEG anchor output.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'eval_file',
help='Evaluation file.')
parser.add_argument(
'mpeg_eval_file',
help='MPEG intra evaluation file.')
parser.add_argument(
'output_file',
help='MPEG intra evaluation file.')
logger.info("Processing started.")
args = parser.parse_args()
assert os.path.exists(args.eval_file), "Eval file not found"
assert os.path.exists(args.mpeg_eval_file), "MPEG eval file not found"
eval_df = pd.read_csv(args.eval_file)
mpeg_eval_df = pd.read_csv(args.mpeg_eval_file, delimiter=";")
ori_file_prefix = os.path.commonprefix(list(eval_df.ori_file))
eval_df['filename'] = eval_df.ori_file.map(lambda s: s[len(ori_file_prefix):])
eval_df.set_index("filename", inplace=True)
mpeg_eval_df.set_index("filename", inplace=True)
fused_df = eval_df.join(mpeg_eval_df, on="filename")
fused_df['bpov'] = (fused_df['byte_count_octree_layer'] * 8 / fused_df['n_points_ori'])
fused_df.to_csv(args.output_file)
logger.info("Processing done.")
| 35.740741 | 91 | 0.570466 |
a035d48566831f781be6f57b10078b0f8adcdb31 | 31,318 | py | Python | metaci/build/models.py | cidarm/MetaCI | bef250b1c8a9fb14e7dd950e90b40fdba71405f8 | [
"BSD-3-Clause"
] | null | null | null | metaci/build/models.py | cidarm/MetaCI | bef250b1c8a9fb14e7dd950e90b40fdba71405f8 | [
"BSD-3-Clause"
] | 504 | 2020-08-30T05:09:11.000Z | 2021-03-24T05:40:01.000Z | metaci/build/models.py | cidarm/MetaCI | bef250b1c8a9fb14e7dd950e90b40fdba71405f8 | [
"BSD-3-Clause"
] | null | null | null | import json
import os
import shutil
import sys
import tempfile
import traceback
import zipfile
from glob import iglob
from io import BytesIO
from cumulusci import __version__ as cumulusci_version
from cumulusci.core.config import FAILED_TO_CREATE_SCRATCH_ORG
from cumulusci.core.exceptions import (
ApexTestException,
BrowserTestFailure,
RobotTestFailure,
ScratchOrgException,
)
from cumulusci.core.flowrunner import FlowCoordinator
from cumulusci.salesforce_api.exceptions import MetadataComponentFailure
from cumulusci.utils import elementtree_parse_file
from django.apps import apps
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.http import Http404
from django.urls import reverse
from django.utils import timezone
from jinja2.sandbox import ImmutableSandboxedEnvironment
from metaci.build.tasks import set_github_status
from metaci.build.utils import format_log, set_build_info
from metaci.cumulusci.config import MetaCIUniversalConfig
from metaci.cumulusci.keychain import MetaCIProjectKeychain
from metaci.cumulusci.logger import init_logger
from metaci.release.utils import (
send_release_webhook,
send_start_webhook,
send_stop_webhook,
)
from metaci.testresults.importer import import_test_results
from metaci.utils import generate_hash
BUILD_STATUSES = (
("queued", "Queued"),
("waiting", "Waiting"),
("running", "Running"),
("success", "Success"),
("error", "Error"),
("fail", "Failed"),
("qa", "QA Testing"),
)
BUILD_FLOW_STATUSES = (
("queued", "Queued"),
("running", "Running"),
("success", "Success"),
("error", "Error"),
("fail", "Failed"),
)
FLOW_TASK_STATUSES = (
("initializing", "Initializing"),
("running", "Running"),
("complete", "Completed"),
("error", "Error"),
)
BUILD_TYPES = (
("manual", "Manual"),
("auto", "Auto"),
("scheduled", "Scheduled"),
("legacy", "Legacy - Probably Automatic"),
("manual-command", "Created from command line"),
)
RELEASE_REL_TYPES = (
("test", "Release Test"),
("automation", "Release Automation"),
("manual", "Manual Release Activity"),
)
FAIL_EXCEPTIONS = (
ApexTestException,
BrowserTestFailure,
MetadataComponentFailure,
RobotTestFailure,
)
jinja2_env = ImmutableSandboxedEnvironment()
class GnarlyEncoder(DjangoJSONEncoder):
""" A Very Gnarly Encoder that serializes a repr() if it can't get anything else.... """
def default(self, obj): # pylint: disable=W0221, E0202
try:
return super().default(obj)
except TypeError:
return repr(obj)
class BuildQuerySet(models.QuerySet):
def for_user(self, user, perms=None):
if user.is_superuser:
return self
if perms is None:
perms = "plan.view_builds"
PlanRepository = apps.get_model("plan.PlanRepository")
return self.filter(planrepo__in=PlanRepository.objects.for_user(user, perms))
def get_for_user_or_404(self, user, query, perms=None):
try:
return self.for_user(user, perms).get(**query)
except Build.DoesNotExist:
raise Http404
class Build(models.Model):
repo = models.ForeignKey(
"repository.Repository", related_name="builds", on_delete=models.CASCADE
)
branch = models.ForeignKey(
"repository.Branch",
related_name="builds",
null=True,
blank=True,
on_delete=models.PROTECT,
)
commit = models.CharField(max_length=64)
commit_message = models.TextField(null=True, blank=True)
commit_status = models.CharField(
max_length=140,
null=True,
blank=True,
help_text="Optional success message to be reported as a github commit status",
)
tag = models.CharField(max_length=255, null=True, blank=True)
pr = models.IntegerField(null=True, blank=True)
plan = models.ForeignKey(
"plan.Plan", related_name="builds", on_delete=models.PROTECT
)
planrepo = models.ForeignKey(
"plan.PlanRepository",
related_name="builds",
on_delete=models.PROTECT,
null=True,
)
org = models.ForeignKey(
"cumulusci.Org",
related_name="builds",
null=True,
blank=True,
on_delete=models.PROTECT,
)
org_instance = models.ForeignKey(
"cumulusci.ScratchOrgInstance",
related_name="builds",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
schedule = models.ForeignKey(
"plan.PlanSchedule",
related_name="builds",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
log = models.TextField(null=True, blank=True)
exception = models.TextField(null=True, blank=True)
error_message = models.TextField(null=True, blank=True)
traceback = models.TextField(null=True, blank=True)
qa_comment = models.TextField(null=True, blank=True)
qa_user = models.ForeignKey(
"users.User",
related_name="builds_qa",
null=True,
blank=True,
on_delete=models.PROTECT,
)
status = models.CharField(max_length=16, choices=BUILD_STATUSES, default="queued")
keep_org = models.BooleanField(default=False)
task_id_status_start = models.CharField(max_length=64, null=True, blank=True)
task_id_check = models.CharField(max_length=64, null=True, blank=True)
task_id_run = models.CharField(max_length=64, null=True, blank=True)
task_id_status_end = models.CharField(max_length=64, null=True, blank=True)
current_rebuild = models.ForeignKey(
"build.Rebuild",
related_name="current_builds",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
time_queue = models.DateTimeField(auto_now_add=True)
time_start = models.DateTimeField(null=True, blank=True)
time_end = models.DateTimeField(null=True, blank=True)
time_qa_start = models.DateTimeField(null=True, blank=True)
time_qa_end = models.DateTimeField(null=True, blank=True)
build_type = models.CharField(max_length=16, choices=BUILD_TYPES, default="legacy")
user = models.ForeignKey(
"users.User", related_name="builds", null=True, on_delete=models.PROTECT
)
release_relationship_type = models.CharField(
max_length=50, choices=RELEASE_REL_TYPES, null=True, blank=True
)
release = models.ForeignKey(
"release.Release", on_delete=models.PROTECT, null=True, blank=True
)
org_note = models.CharField(max_length=255, default="", blank=True, null=True)
org_api_version = models.CharField(max_length=5, blank=True, null=True)
objects = BuildQuerySet.as_manager()
class Meta:
ordering = ["-time_queue"]
permissions = (("search_builds", "Search Builds"),)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._try_populate_planrepo()
def save(self, *args, **kwargs):
self._try_populate_planrepo()
super().save(*args, **kwargs)
def _try_populate_planrepo(self):
if self.plan_id and self.repo_id and not self.planrepo:
PlanRepository = apps.get_model("plan.PlanRepository")
matching_repo = PlanRepository.objects.filter(
plan=self.plan, repo=self.repo
)
if matching_repo.exists():
self.planrepo = matching_repo[0]
def __str__(self):
return f"{self.id}: {self.repo} - {self.commit}"
def get_log_html(self):
if self.log:
return format_log(self.log)
def get_absolute_url(self):
return reverse("build_detail", kwargs={"build_id": str(self.id)})
def get_external_url(self):
url = f"{settings.SITE_URL}{self.get_absolute_url()}"
return url
def get_build(self):
return self.current_rebuild if self.current_rebuild else self
def get_build_attr(self, attr):
# get an attribute from the most recent build/rebuild
build = self.get_build()
return getattr(build, attr)
def get_status(self):
return self.get_build_attr("status")
def get_exception(self):
return self.get_build_attr("exception")
def get_error_message(self):
return self.get_build_attr("error_message")
def get_qa_comment(self):
return self.get_build_attr("qa_comment")
def get_qa_user(self):
return self.get_build_attr("qa_user")
def get_time_queue(self):
return self.get_build_attr("time_queue")
def get_time_start(self):
return self.get_build_attr("time_start")
def get_time_end(self):
return self.get_build_attr("time_end")
def get_time_qa_start(self):
return self.get_build_attr("time_qa_start")
def get_time_qa_end(self):
return self.get_build_attr("time_qa_end")
def get_commit(self):
return f"{self.commit[:8]}"
def set_status(self, status):
build = self.get_build()
build.status = status
build.save()
def flush_log(self):
for handler in self.logger.handlers:
handler.stream.flush()
@property
def worker_id(self):
return os.environ.get("DYNO")
def run(self):
self.logger = init_logger(self)
worker_str = f"in {self.worker_id}" if self.worker_id else ""
self.logger.info(
f"-- Building commit {self.commit} {worker_str} with CumulusCI version {cumulusci_version}"
)
self.flush_log()
build = self.current_rebuild if self.current_rebuild else self
set_build_info(build, status="running", time_start=timezone.now())
if self.schedule:
self.logger.info(
f"Build triggered by {self.schedule.schedule} schedule #{self.schedule.id}"
)
try:
# Extract the repo to a temp build dir
self.build_dir = self.checkout()
self.root_dir = os.getcwd()
# Change directory to the build_dir
os.chdir(self.build_dir)
# Initialize the project config
project_config = self.get_project_config()
# Set the sentry context for build errors
sentry_environment = "metaci"
project_config.config["sentry_environment"] = sentry_environment
# Look up or spin up the org
org_config = self.get_org(project_config)
if (
self.org and self.org.name and self.org.name.lower() == "packaging"
): # Calling for any actions taken against packaging org
send_start_webhook(
project_config,
self.release,
self.plan.role,
self.org.configuration_item,
)
except Exception as e:
self.logger.error(str(e))
set_build_info(
build,
status="error",
time_end=timezone.now(),
error_message=str(e),
exception=e.__class__.__name__,
traceback="".join(traceback.format_tb(e.__traceback__)),
)
self.delete_build_dir()
self.flush_log()
return
try:
self.org_api_version = org_config.latest_api_version
except Exception as e:
self.logger.warning(f"Could not retrieve salesforce API version: {e}")
# Run flows
try:
flows = [flow.strip() for flow in self.plan.flows.split(",")]
for flow in flows:
self.logger = init_logger(self)
self.logger.info(f"Running flow: {flow}")
self.save()
build_flow = BuildFlow(
build=self, rebuild=self.current_rebuild, flow=flow
)
build_flow.save()
build_flow.run(project_config, org_config, self.root_dir)
if build_flow.status != "success":
self.logger = init_logger(self)
self.logger.error(
f"Build flow {flow} completed with status {build_flow.status}"
)
self.logger.error(
f" {build_flow.exception}: {build_flow.error_message}"
)
set_build_info(
build,
status=build_flow.status,
exception=build_flow.exception,
traceback=build_flow.traceback,
error_message=build_flow.error_message,
time_end=timezone.now(),
)
self.flush_log()
if org_config.created:
self.delete_org(org_config)
return
else:
self.logger = init_logger(self)
self.logger.info(f"Build flow {flow} completed successfully")
self.flush_log()
self.save()
except Exception as e:
set_build_info(
build,
exception=str(e),
traceback="".join(traceback.format_tb(e.__traceback__)),
status="error",
time_end=timezone.now(),
)
if org_config.created:
self.delete_org(org_config)
self.logger = init_logger(self)
self.logger.error(str(e))
self.delete_build_dir()
self.flush_log()
return
if self.plan.role == "qa":
self.logger.info("Build complete, org is now ready for QA testing")
elif org_config.created:
self.delete_org(org_config)
self.delete_build_dir()
self.flush_log()
if self.plan.role == "release":
try:
send_release_webhook(
project_config, self.release, self.org.configuration_item
)
except Exception as err:
message = f"Error while sending release webhook: {err}"
self.logger.error(message)
set_build_info(
build, status="error", exception=message, time_end=timezone.now()
)
return
if (
self.org and self.org.name and self.org.name.lower() == "packaging"
): # Calling for any actions taken against packaging org
try:
send_stop_webhook(
project_config,
self.release,
self.plan.role,
self.org.configuration_item,
)
except Exception as err:
message = f"Error while sending implementation stop step webhook: {err}"
self.logger.error(message)
set_build_info(
build, status="error", exception=message, time_end=timezone.now()
)
return
if self.plan.role == "qa":
set_build_info(
build,
status="qa",
time_end=timezone.now(),
time_qa_start=timezone.now(),
)
else:
set_build_info(build, status="success", time_end=timezone.now())
def checkout(self):
# get the ref
zip_content = BytesIO()
gh = self.repo.get_github_api()
gh.archive("zipball", zip_content, ref=self.commit)
build_dir = tempfile.mkdtemp()
self.logger.info(f"-- Extracting zip to temp dir {build_dir}")
self.save()
zip_file = zipfile.ZipFile(zip_content)
zip_file.extractall(build_dir)
# assume the zipfile has a single child dir with the repo
build_dir = os.path.join(build_dir, os.listdir(build_dir)[0])
self.logger.info(f"-- Commit extracted to build dir: {build_dir}")
self.save()
if self.plan.sfdx_config:
self.logger.info("-- Injecting custom sfdx-workspace.json from plan")
filename = os.path.join(build_dir, "sfdx-workspace.json")
with open(filename, "w") as f:
f.write(self.plan.sfdx_config)
return build_dir
def get_project_config(self):
universal_config = MetaCIUniversalConfig()
project_config = universal_config.get_project_config(self)
keychain = MetaCIProjectKeychain(project_config, None, self)
project_config.set_keychain(keychain)
return project_config
def get_org(self, project_config, retries=3):
self.logger = init_logger(self)
attempt = 1
if self.org:
# If the build's org was already set, keep using it
org_name = self.org.name
else:
org_name = self.plan.org
while True:
try:
org_config = project_config.keychain.get_org(org_name)
break
except ScratchOrgException as e:
if (
str(e).startswith(FAILED_TO_CREATE_SCRATCH_ORG)
and attempt <= retries
):
self.logger.warning(str(e))
self.logger.info(
"Retrying create scratch org "
+ f"(retry {attempt} of {retries})"
)
attempt += 1
continue
else:
raise e
self.org = org_config.org
if self.current_rebuild:
self.current_rebuild.org_instance = org_config.org_instance
self.current_rebuild.save()
else:
self.org_instance = org_config.org_instance
self.save()
return org_config
def get_org_instance(self):
if self.current_rebuild and self.current_rebuild.org_instance:
return self.current_rebuild.org_instance
else:
return self.org_instance
def get_org_attr(self, attr):
org_instance = self.get_org_instance()
obj = getattr(org_instance, attr, "")
return obj() if callable(obj) else obj
def get_org_deleted(self):
return self.get_org_attr("deleted")
def get_org_sf_org_id(self):
return self.get_org_attr("sf_org_id")
def get_org_name(self):
return self.get_org_attr("__str__")
def get_org_time_deleted(self):
return self.get_org_attr("time_deleted")
def get_org_url(self):
return self.get_org_attr("get_absolute_url")
def get_org_username(self):
return self.get_org_attr("username")
def delete_org(self, org_config):
self.logger = init_logger(self)
if not org_config.scratch:
return
if self.keep_org:
self.logger.info(
"Skipping scratch org deletion since keep_org was requested"
)
return
if self.status == "error" and self.plan.keep_org_on_error:
self.logger.info(
"Skipping scratch org deletion since keep_org_on_error is enabled"
)
return
if self.status == "fail" and self.plan.keep_org_on_fail:
self.logger.info(
"Skipping scratch org deletion since keep_org_on_fail is enabled"
)
return
try:
org_instance = self.get_org_instance()
org_instance.delete_org(org_config)
except Exception as e:
self.logger.error(str(e))
self.save()
def delete_build_dir(self):
if hasattr(self, "build_dir"):
self.logger.info(f"Deleting build dir {self.build_dir}")
shutil.rmtree(self.build_dir)
self.save()
class BuildFlow(models.Model):
build = models.ForeignKey(
"build.Build", related_name="flows", on_delete=models.CASCADE
)
rebuild = models.ForeignKey(
"build.Rebuild",
related_name="flows",
null=True,
blank=True,
on_delete=models.CASCADE,
)
status = models.CharField(
max_length=16, choices=BUILD_FLOW_STATUSES, default="queued"
)
flow = models.CharField(max_length=255, null=True, blank=True)
log = models.TextField(null=True, blank=True)
exception = models.TextField(null=True, blank=True)
traceback = models.TextField(null=True, blank=True)
error_message = models.TextField(null=True, blank=True)
time_queue = models.DateTimeField(auto_now_add=True)
time_start = models.DateTimeField(null=True, blank=True)
time_end = models.DateTimeField(null=True, blank=True)
tests_total = models.IntegerField(null=True, blank=True)
tests_pass = models.IntegerField(null=True, blank=True)
tests_fail = models.IntegerField(null=True, blank=True)
asset_hash = models.CharField(max_length=64, unique=True, default=generate_hash)
def __str__(self):
return f"{self.build.id}: {self.build.repo} - {self.build.commit} - {self.flow}"
def get_absolute_url(self):
return (
reverse("build_detail", kwargs={"build_id": str(self.build.id)})
+ f"#flow-{self.flow}"
)
def get_log_html(self):
if self.log:
return format_log(self.log)
def run(self, project_config, org_config, root_dir):
self.root_dir = root_dir
# Record the start
set_build_info(self, status="running", time_start=timezone.now())
# Update github status
if settings.GITHUB_STATUS_UPDATES_ENABLED:
set_github_status.delay(self.build_id)
# Set up logger
self.logger = init_logger(self)
try:
# Run the flow
self.run_flow(project_config, org_config)
# Determine build commit status
self.set_commit_status()
# Load test results
self.load_test_results()
# Record result
exception = None
status = "success"
except FAIL_EXCEPTIONS as e:
self.logger.error(traceback.format_exc())
exception = e
self.load_test_results()
status = "fail"
except Exception as e:
self.logger.error(traceback.format_exc())
exception = e
status = "error"
kwargs = {"status": status, "time_end": timezone.now()}
if exception:
kwargs["error_message"] = str(exception)
kwargs["exception"] = exception.__class__.__name__
kwargs["traceback"] = "".join(traceback.format_tb(exception.__traceback__))
set_build_info(self, **kwargs)
def run_flow(self, project_config, org_config):
# Add the repo root to syspath to allow for custom tasks and flows in
# the repo
sys.path.append(project_config.repo_root)
flow_config = project_config.get_flow(self.flow)
# If it's a release build, pass the dates in
options = self._get_flow_options()
callbacks = None
if settings.METACI_FLOW_CALLBACK_ENABLED:
from metaci.build.flows import MetaCIFlowCallback
callbacks = MetaCIFlowCallback(buildflow_id=self.pk)
# Create the flow and handle initialization exceptions
self.flow_instance = FlowCoordinator(
project_config,
flow_config,
name=self.flow,
options=options,
callbacks=callbacks,
)
# Run the flow
return self.flow_instance.run(org_config)
def _get_flow_options(self) -> dict:
options = {}
if self.build.plan.role == "release" and self.build.release:
options["github_release_notes"] = {
"sandbox_date": self.build.release.sandbox_push_date,
"production_date": self.build.release.production_push_date,
}
if (
self.build.plan.role == "push_sandbox" and self.build.release
): # override lives in MetaCI
options["push_sandbox"] = {
"version": f"{self.build.release.version_number}",
}
if (
self.build.plan.role == "push_production" and self.build.release
): # override lives in MetaCI
options["push_all"] = {
"version": f"{self.build.release.version_number}",
}
return options
def set_commit_status(self):
if self.build.plan.commit_status_template:
template = jinja2_env.from_string(self.build.plan.commit_status_template)
message = template.render(results=self.flow_instance.results)
self.build.commit_status = message
self.build.save()
def record_result(self):
self.status = "success"
self.time_end = timezone.now()
self.save()
def load_test_results(self):
"""Import results from JUnit or test_results.json.
Robot Framework results are imported in MetaCIFlowCallback.post_task
"""
# Load JUnit
results = []
if self.build.plan.junit_path:
for filename in iglob(self.build.plan.junit_path):
results.extend(self.load_junit(filename))
if not results:
self.logger.warning(
f"No results found at JUnit path {self.build.plan.junit_path}"
)
if results:
import_test_results(self, results, "JUnit")
# Load from test_results.json
results = []
try:
results_filename = "test_results.json"
with open(results_filename, "r") as f:
results.extend(json.load(f))
for result in results:
result["SourceFile"] = results_filename
except IOError:
try:
results_filename = "test_results.xml"
results.extend(self.load_junit(results_filename))
except IOError:
pass
if results:
import_test_results(self, results, "Apex")
self.tests_total = self.test_results.count()
self.tests_pass = self.test_results.filter(outcome="Pass").count()
self.tests_fail = self.test_results.filter(
outcome__in=["Fail", "CompileFail"]
).count()
self.save()
def load_junit(self, filename):
results = []
tree = elementtree_parse_file(filename)
testsuite = tree.getroot()
for testcase in testsuite.iter("testcase"):
result = {
"ClassName": testcase.attrib["classname"],
"Method": testcase.attrib["name"],
"Outcome": "Pass",
"StackTrace": "",
"Message": "",
"Stats": {"duration": testcase.get("time")},
"SourceFile": filename,
}
for element in testcase.iter():
if element.tag not in ["failure", "error"]:
continue
result["Outcome"] = "Fail"
if element.text:
result["StackTrace"] += element.text + "\n"
message = element.get("type", "")
if element.get("message"):
message += ": " + element.get("message", "")
result["Message"] += message + "\n"
results.append(result)
return results
def asset_upload_to(instance, filename):
folder = instance.build_flow.asset_hash
return os.path.join(folder, filename)
class BuildFlowAsset(models.Model):
build_flow = models.ForeignKey(
BuildFlow, related_name="assets", on_delete=models.CASCADE
)
asset = models.FileField(upload_to=asset_upload_to)
category = models.CharField(max_length=1024)
class Rebuild(models.Model):
build = models.ForeignKey(
"build.Build", related_name="rebuilds", on_delete=models.CASCADE
)
user = models.ForeignKey(
"users.User", related_name="rebuilds", on_delete=models.PROTECT
)
org_instance = models.ForeignKey(
"cumulusci.ScratchOrgInstance",
related_name="rebuilds",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
status = models.CharField(max_length=16, choices=BUILD_STATUSES, default="queued")
exception = models.TextField(null=True, blank=True)
error_message = models.TextField(null=True, blank=True)
qa_comment = models.TextField(null=True, blank=True)
qa_user = models.ForeignKey(
"users.User", related_name="rebuilds_qa", null=True, on_delete=models.PROTECT
)
time_queue = models.DateTimeField(auto_now_add=True)
time_start = models.DateTimeField(null=True, blank=True)
time_end = models.DateTimeField(null=True, blank=True)
time_qa_start = models.DateTimeField(null=True, blank=True)
time_qa_end = models.DateTimeField(null=True, blank=True)
class Meta:
ordering = ["-id"]
def get_absolute_url(self):
return reverse(
"build_detail",
kwargs={"build_id": str(self.build.id), "rebuild_id": str(self.id)},
)
class FlowTaskManager(models.Manager):
# TODO: refactor to use step strings?
def find_task(self, build_flow_id, path, step_num):
try:
return self.get(build_flow_id=build_flow_id, path=path, stepnum=step_num)
except ObjectDoesNotExist:
return FlowTask(build_flow_id=build_flow_id, path=path, stepnum=step_num)
class FlowTask(models.Model):
""" A FlowTask holds the result of a task execution during a BuildFlow. """
time_start = models.DateTimeField(null=True, blank=True)
time_end = models.DateTimeField(null=True, blank=True)
# time_initialize = models.DateTimeField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
stepnum = models.CharField(
max_length=64, help_text="dotted step number for CCI task"
)
path = models.CharField(
max_length=2048, help_text="dotted path e.g. flow1.flow2.task_name"
)
class_path = models.TextField(null=True, blank=True)
options = JSONField(null=True, blank=True, encoder=GnarlyEncoder)
result = JSONField(null=True, blank=True, encoder=GnarlyEncoder)
return_values = JSONField(null=True, blank=True, encoder=GnarlyEncoder)
exception = models.CharField(max_length=255, null=True, blank=True)
exception_value = JSONField(null=True, blank=True, encoder=GnarlyEncoder)
status = models.CharField(
max_length=16, choices=FLOW_TASK_STATUSES, default="queued"
)
build_flow = models.ForeignKey(
"build.BuildFlow", related_name="tasks", on_delete=models.CASCADE
)
objects = FlowTaskManager()
def __str__(self):
return f"{self.build_flow_id}: {self.stepnum} - {self.path}"
class Meta:
ordering = ["-build_flow", "stepnum"]
verbose_name = "Flow Task"
verbose_name_plural = "Flow Tasks"
| 34.377607 | 103 | 0.608244 |
52febd080bb15150560db68fe4c7556fc7438a05 | 6,718 | py | Python | tools/train.py | Wellleee/mm | 99b43550fa67d4f68438cf7991d41842f13740f8 | [
"Apache-2.0"
] | null | null | null | tools/train.py | Wellleee/mm | 99b43550fa67d4f68438cf7991d41842f13740f8 | [
"Apache-2.0"
] | null | null | null | tools/train.py | Wellleee/mm | 99b43550fa67d4f68438cf7991d41842f13740f8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcls import __version__
from mmcls.apis import set_random_seed, train_model
from mmcls.datasets import build_dataset
from mmcls.models import build_classifier
from mmcls.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('--config', default='configs/resnet/resnet18_flowers_bs128.py', help='train config file path, i.e. resnet18_flowers_bs128.py, resnet18_b16x8_cifar10.py')
parser.add_argument('--work-dir', default='output/resnet18_flowers_bs128', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--device', help='device used for training')
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_classifier(cfg.model)
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmcls version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmcls_version=__version__,
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
device='cpu',
meta=meta)
if __name__ == '__main__':
main()
| 36.912088 | 177 | 0.653915 |
80900d9f7049c572252ac1881332d84cd3c349e5 | 5,288 | py | Python | third_party/webrtc/src/tools/python_charts/data/vp8_hw.py | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 8 | 2016-02-08T11:59:31.000Z | 2020-05-31T15:19:54.000Z | third_party/webrtc/src/tools/python_charts/data/vp8_hw.py | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 1 | 2021-05-05T11:11:31.000Z | 2021-05-05T11:11:31.000Z | third_party/webrtc/src/tools/python_charts/data/vp8_hw.py | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 7 | 2016-02-09T09:28:14.000Z | 2020-07-25T19:03:36.000Z | # Sample output from the video_quality_measurment program, included only for
# reference. Geneate your own by running with the --python flag and then change
# the filenames in main.py
test_configuration = [{'name': 'name', 'value': 'VP8 hardware test'},
{'name': 'description', 'value': ''},
{'name': 'test_number', 'value': '0'},
{'name': 'input_filename', 'value': 'foreman_cif.yuv'},
{'name': 'output_filename', 'value': 'foreman_cif_out.yuv'},
{'name': 'output_dir', 'value': '.'},
{'name': 'packet_size_in_bytes', 'value': '1500'},
{'name': 'max_payload_size_in_bytes', 'value': '1440'},
{'name': 'packet_loss_mode', 'value': 'Uniform'},
{'name': 'packet_loss_probability', 'value': '0.000000'},
{'name': 'packet_loss_burst_length', 'value': '1'},
{'name': 'exclude_frame_types', 'value': 'ExcludeOnlyFirstKeyFrame'},
{'name': 'frame_length_in_bytes', 'value': '152064'},
{'name': 'use_single_core', 'value': 'False'},
{'name': 'keyframe_interval;', 'value': '0'},
{'name': 'video_codec_type', 'value': 'VP8'},
{'name': 'width', 'value': '352'},
{'name': 'height', 'value': '288'},
{'name': 'bit_rate_in_kbps', 'value': '500'},
]
frame_data_types = {'frame_number': ('number', 'Frame number'),
'encoding_successful': ('boolean', 'Encoding successful?'),
'decoding_successful': ('boolean', 'Decoding successful?'),
'encode_time': ('number', 'Encode time (us)'),
'decode_time': ('number', 'Decode time (us)'),
'encode_return_code': ('number', 'Encode return code'),
'decode_return_code': ('number', 'Decode return code'),
'bit_rate': ('number', 'Bit rate (kbps)'),
'encoded_frame_length': ('number', 'Encoded frame length (bytes)'),
'frame_type': ('string', 'Frame type'),
'packets_dropped': ('number', 'Packets dropped'),
'total_packets': ('number', 'Total packets'),
'ssim': ('number', 'SSIM'),
'psnr': ('number', 'PSNR (dB)'),
}
frame_data = [{'frame_number': 0, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 94676, 'decode_time': 37942, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 1098, 'encoded_frame_length': 4579, 'frame_type': 'Other', 'packets_dropped': 0, 'total_packets': 4, 'ssim': 0.910364, 'psnr': 35.067258},
{'frame_number': 1, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 244007, 'decode_time': 39421, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 306, 'encoded_frame_length': 1277, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.911859, 'psnr': 35.115193},
{'frame_number': 2, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 240508, 'decode_time': 38918, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 330, 'encoded_frame_length': 1379, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.913597, 'psnr': 35.181604},
{'frame_number': 3, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 243449, 'decode_time': 39664, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 298, 'encoded_frame_length': 1242, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.912378, 'psnr': 35.164710},
{'frame_number': 4, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 248024, 'decode_time': 39115, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 332, 'encoded_frame_length': 1385, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.911471, 'psnr': 35.109488},
{'frame_number': 5, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 246910, 'decode_time': 39146, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 416, 'encoded_frame_length': 1734, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.915231, 'psnr': 35.392300},
{'frame_number': 6, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 242953, 'decode_time': 38827, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 279, 'encoded_frame_length': 1165, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.916130, 'psnr': 35.452889},
{'frame_number': 7, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 247343, 'decode_time': 41429, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 393, 'encoded_frame_length': 1639, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.919356, 'psnr': 35.647128},
{'frame_number': 8, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 249529, 'decode_time': 40329, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 487, 'encoded_frame_length': 2033, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.924705, 'psnr': 36.179837},
{'frame_number': 9, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 249408, 'decode_time': 41716, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 583, 'encoded_frame_length': 2433, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.928433, 'psnr': 36.589875},
]
| 103.686275 | 338 | 0.671142 |
147c9987217b3b62b794c33c46afca72e9090c62 | 15,591 | py | Python | examples/simultaneous_translation/stacl/model.py | mukaiu/PaddleNLP | 0315365dbafa6e3b1c7147121ba85e05884125a5 | [
"Apache-2.0"
] | null | null | null | examples/simultaneous_translation/stacl/model.py | mukaiu/PaddleNLP | 0315365dbafa6e3b1c7147121ba85e05884125a5 | [
"Apache-2.0"
] | null | null | null | examples/simultaneous_translation/stacl/model.py | mukaiu/PaddleNLP | 0315365dbafa6e3b1c7147121ba85e05884125a5 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlenlp.transformers import WordEmbedding, PositionalEmbedding
class CrossEntropyCriterion(nn.Layer):
def __init__(self, label_smooth_eps, pad_idx=0):
super(CrossEntropyCriterion, self).__init__()
self.label_smooth_eps = label_smooth_eps
self.pad_idx = pad_idx
def forward(self, predict, label):
weights = paddle.cast(label != self.pad_idx,
dtype=paddle.get_default_dtype())
if self.label_smooth_eps:
label = F.label_smooth(label=F.one_hot(
x=label, num_classes=predict.shape[-1]),
epsilon=self.label_smooth_eps)
cost = F.cross_entropy(
input=predict,
label=label,
reduction='none',
soft_label=True if self.label_smooth_eps else False).squeeze()
weighted_cost = cost * weights
sum_cost = paddle.sum(weighted_cost)
token_num = paddle.sum(weights)
token_num.stop_gradient = True
avg_cost = sum_cost / token_num
return sum_cost, avg_cost, token_num
class DecoderLayer(nn.TransformerDecoderLayer):
def __init__(self, *args, **kwargs):
super(DecoderLayer, self).__init__(*args, **kwargs)
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, cache=None):
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
if cache is None:
tgt = self.self_attn(tgt, tgt, tgt, tgt_mask, None)
else:
tgt, incremental_cache = self.self_attn(tgt, tgt, tgt, tgt_mask,
cache[0])
tgt = residual + self.dropout1(tgt)
if not self.normalize_before:
tgt = self.norm1(tgt)
residual = tgt
if self.normalize_before:
tgt = self.norm2(tgt)
if len(memory) == 1:
# Full sent
tgt = self.cross_attn(tgt, memory[0], memory[0], memory_mask, None)
else:
# Wait-k policy
cross_attn_outputs = []
for i in range(tgt.shape[1]):
q = tgt[:, i:i + 1, :]
if i >= len(memory):
e = memory[-1]
else:
e = memory[i]
cross_attn_outputs.append(
self.cross_attn(q, e, e, memory_mask[:, :,
i:i + 1, :e.shape[1]],
None))
tgt = paddle.concat(cross_attn_outputs, axis=1)
tgt = residual + self.dropout2(tgt)
if not self.normalize_before:
tgt = self.norm2(tgt)
residual = tgt
if self.normalize_before:
tgt = self.norm3(tgt)
tgt = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = residual + self.dropout3(tgt)
if not self.normalize_before:
tgt = self.norm3(tgt)
return tgt if cache is None else (tgt, (incremental_cache, ))
class Decoder(nn.TransformerDecoder):
"""
PaddlePaddle 2.1 casts memory_mask.dtype to memory.dtype, but in STACL,
type of memory is list, having no dtype attribute.
"""
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, cache=None):
output = tgt
new_caches = []
for i, mod in enumerate(self.layers):
if cache is None:
output = mod(output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
cache=None)
else:
output, new_cache = mod(output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
cache=cache[i])
new_caches.append(new_cache)
if self.norm is not None:
output = self.norm(output)
return output if cache is None else (output, new_caches)
class SimultaneousTransformer(nn.Layer):
"""
model
"""
def __init__(self,
src_vocab_size,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_model,
d_inner_hid,
dropout,
weight_sharing,
bos_id=0,
eos_id=1,
waitk=-1):
super(SimultaneousTransformer, self).__init__()
self.trg_vocab_size = trg_vocab_size
self.emb_dim = d_model
self.bos_id = bos_id
self.eos_id = eos_id
self.dropout = dropout
self.waitk = waitk
self.n_layer = n_layer
self.n_head = n_head
self.d_model = d_model
self.src_word_embedding = WordEmbedding(vocab_size=src_vocab_size,
emb_dim=d_model,
bos_id=self.bos_id)
self.src_pos_embedding = PositionalEmbedding(emb_dim=d_model,
max_length=max_length)
if weight_sharing:
assert src_vocab_size == trg_vocab_size, (
"Vocabularies in source and target should be same for weight sharing."
)
self.trg_word_embedding = self.src_word_embedding
self.trg_pos_embedding = self.src_pos_embedding
else:
self.trg_word_embedding = WordEmbedding(vocab_size=trg_vocab_size,
emb_dim=d_model,
bos_id=self.bos_id)
self.trg_pos_embedding = PositionalEmbedding(emb_dim=d_model,
max_length=max_length)
encoder_layer = nn.TransformerEncoderLayer(d_model=d_model,
nhead=n_head,
dim_feedforward=d_inner_hid,
dropout=dropout,
activation='relu',
normalize_before=True,
bias_attr=[False, True])
encoder_norm = nn.LayerNorm(d_model)
self.encoder = nn.TransformerEncoder(encoder_layer=encoder_layer,
num_layers=n_layer,
norm=encoder_norm)
decoder_layer = DecoderLayer(d_model=d_model,
nhead=n_head,
dim_feedforward=d_inner_hid,
dropout=dropout,
activation='relu',
normalize_before=True,
bias_attr=[False, False, True])
decoder_norm = nn.LayerNorm(d_model)
self.decoder = Decoder(decoder_layer=decoder_layer,
num_layers=n_layer,
norm=decoder_norm)
if weight_sharing:
self.linear = lambda x: paddle.matmul(x=x,
y=self.trg_word_embedding.
word_embedding.weight,
transpose_y=True)
else:
self.linear = nn.Linear(in_features=d_model,
out_features=trg_vocab_size,
bias_attr=False)
def forward(self, src_word, trg_word):
src_max_len = paddle.shape(src_word)[-1]
trg_max_len = paddle.shape(trg_word)[-1]
base_attn_bias = paddle.cast(
src_word == self.bos_id,
dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e9
src_slf_attn_bias = base_attn_bias
src_slf_attn_bias.stop_gradient = True
trg_slf_attn_bias = paddle.tensor.triu((paddle.ones(
(trg_max_len, trg_max_len), dtype=paddle.get_default_dtype()) *
-np.inf), 1)
trg_slf_attn_bias.stop_gradient = True
trg_src_attn_bias = paddle.tile(base_attn_bias, [1, 1, trg_max_len, 1])
src_pos = paddle.cast(src_word != self.bos_id,
dtype="int64") * paddle.arange(start=0,
end=src_max_len)
trg_pos = paddle.cast(trg_word != self.bos_id,
dtype="int64") * paddle.arange(start=0,
end=trg_max_len)
src_emb = self.src_word_embedding(src_word)
src_pos_emb = self.src_pos_embedding(src_pos)
src_emb = src_emb + src_pos_emb
enc_input = F.dropout(
src_emb, p=self.dropout,
training=self.training) if self.dropout else src_emb
with paddle.static.amp.fp16_guard():
if self.waitk >= src_max_len or self.waitk == -1:
# Full sentence
enc_outputs = [
self.encoder(enc_input, src_mask=src_slf_attn_bias)
]
else:
# Wait-k policy
enc_outputs = []
for i in range(self.waitk, src_max_len + 1):
enc_output = self.encoder(
enc_input[:, :i, :],
src_mask=src_slf_attn_bias[:, :, :, :i])
enc_outputs.append(enc_output)
trg_emb = self.trg_word_embedding(trg_word)
trg_pos_emb = self.trg_pos_embedding(trg_pos)
trg_emb = trg_emb + trg_pos_emb
dec_input = F.dropout(
trg_emb, p=self.dropout,
training=self.training) if self.dropout else trg_emb
dec_output = self.decoder(dec_input,
enc_outputs,
tgt_mask=trg_slf_attn_bias,
memory_mask=trg_src_attn_bias)
predict = self.linear(dec_output)
return predict
def beam_search(self, src_word, beam_size=4, max_len=256, waitk=-1):
# TODO: "Speculative Beam Search for Simultaneous Translation"
raise NotImplementedError
def greedy_search(self, src_word, max_len=256, waitk=-1):
src_max_len = paddle.shape(src_word)[-1]
base_attn_bias = paddle.cast(
src_word == self.bos_id,
dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e9
src_slf_attn_bias = base_attn_bias
src_slf_attn_bias.stop_gradient = True
trg_src_attn_bias = paddle.tile(base_attn_bias, [1, 1, 1, 1])
src_pos = paddle.cast(src_word != self.bos_id,
dtype="int64") * paddle.arange(start=0,
end=src_max_len)
src_emb = self.src_word_embedding(src_word)
src_pos_emb = self.src_pos_embedding(src_pos)
src_emb = src_emb + src_pos_emb
enc_input = F.dropout(
src_emb, p=self.dropout,
training=self.training) if self.dropout else src_emb
if waitk < 0 or waitk > src_max_len:
enc_outputs = [self.encoder(enc_input, src_mask=src_slf_attn_bias)]
else:
enc_outputs = []
for i in range(waitk, src_max_len + 1):
enc_output = self.encoder(
enc_input[:, :i, :],
src_mask=src_slf_attn_bias[:, :, :, :i])
enc_outputs.append(enc_output)
# constant number
batch_size = enc_outputs[-1].shape[0]
max_len = (enc_outputs[-1].shape[1] +
20) if max_len is None else max_len
end_token_tensor = paddle.full(shape=[batch_size, 1],
fill_value=self.eos_id,
dtype="int64")
predict_ids = []
log_probs = paddle.full(shape=[batch_size, 1],
fill_value=0,
dtype="float32")
trg_word = paddle.full(shape=[batch_size, 1],
fill_value=self.bos_id,
dtype="int64")
# init states (caches) for transformer
caches = self.decoder.gen_cache(enc_outputs[-1], do_zip=False)
for i in range(max_len):
trg_pos = paddle.full(shape=trg_word.shape,
fill_value=i,
dtype="int64")
trg_emb = self.trg_word_embedding(trg_word)
trg_pos_emb = self.trg_pos_embedding(trg_pos)
trg_emb = trg_emb + trg_pos_emb
dec_input = F.dropout(
trg_emb, p=self.dropout,
training=self.training) if self.dropout else trg_emb
if waitk < 0 or i >= len(enc_outputs):
# Avoid getting the whole source in advance, a diff from:
# https://github.com/autosimtrans/SimulTransBaseline/blob/master/model.py#L1207
# if the decoder step is full sent or longer than all source
# step, then read the whole src
_e = enc_outputs[-1]
dec_output, caches = self.decoder(
dec_input, [_e], None,
trg_src_attn_bias[:, :, :, :_e.shape[1]], caches)
else:
_e = enc_outputs[i]
dec_output, caches = self.decoder(
dec_input, [_e], None,
trg_src_attn_bias[:, :, :, :_e.shape[1]], caches)
dec_output = paddle.reshape(dec_output,
shape=[-1, dec_output.shape[-1]])
logits = self.linear(dec_output)
step_log_probs = paddle.log(F.softmax(logits, axis=-1))
log_probs = paddle.add(x=step_log_probs, y=log_probs)
scores = log_probs
topk_scores, topk_indices = paddle.topk(x=scores, k=1)
finished = paddle.equal(topk_indices, end_token_tensor)
trg_word = topk_indices
log_probs = topk_scores
predict_ids.append(topk_indices)
if paddle.all(finished).numpy():
break
predict_ids = paddle.stack(predict_ids, axis=0)
finished_seq = paddle.transpose(predict_ids, [1, 2, 0])
finished_scores = topk_scores
return finished_seq, finished_scores
| 42.137838 | 95 | 0.519402 |
2ad3af7f911a821dcb642aecd5529fdc0942f7f4 | 7,177 | py | Python | easy_rec/python/compat/embedding_ops.py | xia-huang-411303/EasyRec | 7b2050dddc0bfec9e551e2199a36414a3ee82588 | [
"Apache-2.0"
] | 285 | 2021-10-11T03:39:43.000Z | 2022-03-31T09:12:33.000Z | easy_rec/python/compat/embedding_ops.py | xia-huang-411303/EasyRec | 7b2050dddc0bfec9e551e2199a36414a3ee82588 | [
"Apache-2.0"
] | 84 | 2021-10-15T03:48:58.000Z | 2022-03-31T12:38:53.000Z | easy_rec/python/compat/embedding_ops.py | xia-huang-411303/EasyRec | 7b2050dddc0bfec9e551e2199a36414a3ee82588 | [
"Apache-2.0"
] | 71 | 2021-10-15T03:33:44.000Z | 2022-03-31T08:37:11.000Z | # -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
"""Add embedding column for EmbeddingVariable which is only available on pai."""
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner='mean',
default_id=None,
name=None,
partition_strategy='div',
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
Fixed so that could be used with Pai EmbeddingVariables.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float `Tensor`s or values representing
partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if embedding_weights is None:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
embed_tensors = [ops.convert_to_tensor(embedding_weights)]
with ops.name_scope(name, 'embedding_lookup',
embed_tensors + [sparse_ids, sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)
])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(sparse_ids.indices,
sparse_weights.values,
sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
if combiner != 'sum':
sparse_ids, sparse_weights = _prune_invalid_weights(
sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(
sparse_ids, default_id or 0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
indices = sparse_ids.indices
values = sparse_ids.values
if values.dtype != dtypes.int64:
values = math_ops.to_int64(values)
sparse_ids = sparse_tensor.SparseTensor(
indices=indices, values=values, dense_shape=sparse_ids.dense_shape)
result = embedding_ops.embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(
is_row_empty, array_ops.zeros_like(result), result, name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(
tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
| 44.030675 | 80 | 0.689285 |
ee7a84e7769d07c04d87a0523daf2565e0f36f15 | 226 | py | Python | douglas/__init__.py | willkg/douglas | 7e46919d0baefecba414f41980cbe9c0529a884e | [
"MIT"
] | 1 | 2016-02-12T15:26:24.000Z | 2016-02-12T15:26:24.000Z | douglas/__init__.py | willkg/douglas | 7e46919d0baefecba414f41980cbe9c0529a884e | [
"MIT"
] | 1 | 2015-04-20T13:33:39.000Z | 2015-04-20T13:33:39.000Z | douglas/__init__.py | willkg/douglas | 7e46919d0baefecba414f41980cbe9c0529a884e | [
"MIT"
] | null | null | null | # valid version formats:
# * x.y - final release
# * x.ya1 - alpha 1
# * x.yb1 - beta 1
# * x.yrc1 - release candidate 1
# * x.y.dev - dev
# see http://www.python.org/dev/peps/pep-0386/
__version__ = "0.1.dev"
| 20.545455 | 46 | 0.579646 |
4f37487d3d7bb2ca87736b18ab51705938bde3e4 | 1,335 | py | Python | HW3/mnistLoader.py | nick-terry/interactive-learning | aa247a68aa7562f638629b680bc4a0b485bfd759 | [
"MIT"
] | null | null | null | HW3/mnistLoader.py | nick-terry/interactive-learning | aa247a68aa7562f638629b680bc4a0b485bfd759 | [
"MIT"
] | null | null | null | HW3/mnistLoader.py | nick-terry/interactive-learning | aa247a68aa7562f638629b680bc4a0b485bfd759 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 11:32:03 2021
@author: nick
"""
import numpy as np
from mnist import MNIST
def loadMNIST():
mndata = MNIST('./data/')
X_train, y_train = map(np.array, mndata.load_training())
X_test, y_test = map(np.array, mndata.load_testing())
X_train = X_train/255.0
X_test = X_test/255.0
return X_train,y_train,X_test,y_test
def getEig(X):
# compute mean of training set
mu = np.mean(X,axis=0)
# compute sample covar matrix
diff = X - mu
sigma = diff.T @ diff / X.shape[0]
Lambda, V = np.linalg.eig(sigma)
return Lambda, V
def project(X,basis):
# project X onto basis w/ inner products
proj = X @ basis
# compute reconstruction as linear comb of eigenvectors
# reconstr = proj @ basis.T
return proj
def getRepresentation(X,d,V=None,retV=False):
# Get a d-dimensional representation of X using PCA
if V is None:
Lambda, V = getEig(X)
basis = np.real(V[:,:d])
representation = project(X,basis)
if retV:
return representation,V
else:
return representation
def rescale(X):
# Rescale the rows of X to have unit L2 norm
return X/np.linalg.norm(X,ord=2,axis=1)[:,None]
| 20.859375 | 60 | 0.605993 |
8128c89cdf51da8fca39db95a95c7059979952bc | 2,256 | py | Python | SCRIPTS/naive_bayes.py | kennedyCzar/SEMI-SUPERVISED-NAIVE-BAYES-FOR-TEXT-CLASSIFICATION | 599930ace3d3a5432ef5687c0eec6ccb2f35b918 | [
"MIT"
] | null | null | null | SCRIPTS/naive_bayes.py | kennedyCzar/SEMI-SUPERVISED-NAIVE-BAYES-FOR-TEXT-CLASSIFICATION | 599930ace3d3a5432ef5687c0eec6ccb2f35b918 | [
"MIT"
] | null | null | null | SCRIPTS/naive_bayes.py | kennedyCzar/SEMI-SUPERVISED-NAIVE-BAYES-FOR-TEXT-CLASSIFICATION | 599930ace3d3a5432ef5687c0eec6ccb2f35b918 | [
"MIT"
] | 2 | 2019-08-12T08:08:25.000Z | 2019-12-23T17:06:07.000Z | import csv
import random
import nltk
def get_labeled_reviews(path_to_csv):
labeled_reviews = []
with open(path_to_csv, newline='', encoding='utf-8') as csvfile:
review_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
next(review_reader, None) # Skip csv headers
for row in review_reader:
label = int(row[0])
review_text = row[2]
review = (review_text, label)
labeled_reviews.append(review)
return labeled_reviews
def review_features(review, all_words):
#features = {}
features = all_words.copy()
#features["review"] = review
for word in str.split(review, " "):
if len(word) > 1:
if word in features:
features[word] += 1
else:
features[word] = 1
return features
def cross_validation(all_data, n_sets):
set_size = 1.0 / n_sets
shuffled_data = all_data.copy()
random.shuffle(shuffled_data)
cumulative_percent = 0
for i in range(0, 2):
n_training = int(set_size * len(all_data))
split_start = i * n_training
split_end = (i + 1) * n_training
print("train split_start: " + str(split_start) + " - split_end: " + str(split_end))
train_data_before = shuffled_data[:split_start]
train_data_after = shuffled_data[split_end:]
train_data = train_data_before + train_data_after
test_data = shuffled_data[split_start:split_end]
print('{}\n{}\n{}'.format(train_data_before, train_data_after, train_data))
# print("train size: " + str(len(train_data)) + " - test size: " + str(len(test_data)))
classifier = nltk.NaiveBayesClassifier.train(train_data, nltk.LaplaceProbDist)
correct = 0
for i, (t, l) in enumerate(test_data):
classified = classifier.classify(t)
# actual = labeled_reviews[split_point:][i][1]
if classified == l:
correct += 1
print(str(correct) + "/" + str(len(test_data)))
correct_percent = correct/len(test_data)
cumulative_percent += correct_percent
print(str(correct_percent) + "%")
print("Average result: " + str(cumulative_percent / n_sets) + "%")
| 33.176471 | 95 | 0.615248 |
6e95fbbd721634fb87fd317250f9f2302f3bc1da | 3,730 | py | Python | wpa_project/student_app/views/search_view.py | s-amundson/wpa_2p1 | 43deb859123e5ef2eab3652e403c8d2f53d43b77 | [
"MIT"
] | 1 | 2022-01-03T02:46:34.000Z | 2022-01-03T02:46:34.000Z | wpa_project/student_app/views/search_view.py | s-amundson/wpa_2p1 | 43deb859123e5ef2eab3652e403c8d2f53d43b77 | [
"MIT"
] | 31 | 2021-12-29T17:43:06.000Z | 2022-03-25T01:03:17.000Z | wpa_project/student_app/views/search_view.py | s-amundson/wpa_2p1 | 43deb859123e5ef2eab3652e403c8d2f53d43b77 | [
"MIT"
] | null | null | null | from allauth.account.models import EmailAddress
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.http import HttpResponseForbidden
from django.shortcuts import render
from django.views.generic.base import View
import logging
from ..forms import SearchEmailForm, SearchNameForm, SearchPhoneForm
from ..models import StudentFamily, Student
logger = logging.getLogger(__name__)
class SearchResultView(UserPassesTestMixin, View):
def get(self, request, student_family):
s = StudentFamily.objects.filter(pk=student_family)
return render(request, 'student_app/search_result.html', {'student_family': s})
def test_func(self):
return self.request.user.is_board
class SearchView(LoginRequiredMixin, View):
def get(self, request):
if not (request.user.is_board or request.user.is_staff):
return HttpResponseForbidden()
email_form = SearchEmailForm()
name_form = SearchNameForm()
phone_form = SearchPhoneForm()
return render(request, 'student_app/student_search.html',
{'email_form': email_form, 'name_form': name_form, 'phone_form': phone_form})
def post(self, request):
if not (request.user.is_board or request.user.is_staff):
return HttpResponseForbidden()
if 'email' in request.POST:
form = SearchEmailForm(request.POST)
if form.is_valid():
user = EmailAddress.objects.filter(email__iexact=form.cleaned_data['email'])
if len(user) == 0:
return render(request, 'student_app/message.html', {'message': 'No email found'})
student_family = []
for u in user:
# student_family.append(StudentFamily.objects.get(user__id=u.user_id))
try:
student = Student.objects.get(user__id=u.user_id)
except Student.DoesNotExist:
return render(request, 'student_app/message.html', {'message': 'No student found'})
logging.debug(student)
student_family.append(student.student_family)
return render(request, 'student_app/search_result.html', {'student_family': student_family})
elif 'first_name' in request.POST:
form = SearchNameForm(request.POST)
if form.is_valid():
student = Student.objects.filter(first_name__iexact=form.cleaned_data['first_name'],
last_name__iexact=form.cleaned_data['last_name'])
if len(student) == 0:
return render(request, 'student_app/message.html', {'message': 'No student found'})
student_family = []
for s in student:
student_family.append(s.student_family)
return render(request, 'student_app/search_result.html', {'student_family': student_family})
elif 'phone' in request.POST:
form = SearchPhoneForm(request.POST)
if form.is_valid():
s = StudentFamily.objects.filter(phone=form.cleaned_data['phone'])
if len(s) == 0:
return render(request, 'student_app/message.html', {'message': 'No student found'})
return render(request, 'student_app/search_result.html', {'student_family': s})
email_form = SearchEmailForm()
name_form = SearchNameForm()
phone_form = SearchPhoneForm()
return render(request, 'student_app/student_search.html',
{'email_form': email_form, 'name_form': name_form, 'phone_form': phone_form})
| 50.405405 | 108 | 0.629223 |
0e81130444c65ad91d3627608b1eece4c56fc96a | 6,192 | py | Python | poller/poller.py | broadcaststorm/maestro-poller-service | 39064050327f8df0a9d4e1bd2d9da1cb35a8a24a | [
"MIT"
] | null | null | null | poller/poller.py | broadcaststorm/maestro-poller-service | 39064050327f8df0a9d4e1bd2d9da1cb35a8a24a | [
"MIT"
] | null | null | null | poller/poller.py | broadcaststorm/maestro-poller-service | 39064050327f8df0a9d4e1bd2d9da1cb35a8a24a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Stop gap measure to poll WebEx Teams for messages
"""
from os import environ
from time import sleep
from webexteamssdk import WebexTeamsAPI
from webexteamssdk.exceptions import RateLimitWarning, RateLimitError
from webexteamssdk.generator_containers import GeneratorContainer
from webexteamssdk.models.immutable import Room, Message
import parser
import library
def send_webex_message(webex, room_id, text_to_send):
webex.messages.create(roomId=room_id, text=text_to_send)
def send_webex_responses(webex, room_id, msg_list):
for (parent_id, msg) in msg_list:
webex.messages.create(
roomId=room_id, parentId=parent_id, text=msg
)
pass
def get_latest_commands(webex, room_id, latest_message_id, max_messages):
# It I try to collapse msg_iter into a list, WebEx throws a rate limiting
# warning. So these first few chunks of code are to work around service
# challenges.
msg_iter: GeneratorContainer(Message) = webex.messages.list(
room_id, mentionedPeople="me", max=int(max_messages)
)
# If we are just starting up, reset the latest command marker
if not latest_message_id:
# Stupid GeneratorContainer doesn't support indexing
for msg in msg_iter:
latest_message_id = msg.id
return latest_message_id, list()
# If there happen to be no messages, try again later
return None, list()
# Convert the Message list to Python native format
commands = {msg.id: (msg.text, msg.personEmail) for msg in msg_iter}
# Python 3.7+ guarantees key order so... get the most recent unread msgs
msg_ids: list(int) = list(commands.keys())
try:
latest_idx = msg_ids.index(latest_message_id)
new_msg_ids = msg_ids[:latest_idx]
except ValueError:
new_msg_ids = msg_ids
if len(new_msg_ids) == 0:
return latest_message_id, list()
# The messages are sorted newest to oldest, so grab latest msg id
latest_message_id = new_msg_ids[0]
# Now, reverse the order to process commands in order
new_msg_ids.reverse()
# Build the commands to be parsed (id, command, email)
return_commands = [
(id, str(commands[id][0]), str(commands[id][1])) for id in new_msg_ids
]
return latest_message_id, return_commands
def get_webex_room_id(webex, room_title):
# Get roomID for the room
room_list: list(Room) = webex.rooms.list()
# Search through the list to find all room IDs that match the title
all_room_ids = [
room.id
for room in room_list
if room.title == room_title
]
# We should only find one (application requiremes unique titles)
if len(all_room_ids) > 1:
raise Exception(
f'Duplicate rooms found for {room_title}',
list(room_list)
)
return all_room_ids[0] if len(all_room_ids) else 0
def poller_initialization():
# Polling interval?
interval = environ.get('WEBEX_TEAMS_POLLING_INTERVAL')
if not interval:
raise Exception('WEBEX_TEAMS_POLLING_INTERVAL env var is required.')
# Which room are we monitoring?
webex_room_title = environ.get('WEBEX_TEAMS_ROOM_TITLE')
if not webex_room_title:
raise Exception('WEBEX_TEAMS_ROOM_TITLE env var is required.')
# Make sure our secure token is loaded
if not environ.get('WEBEX_TEAMS_ACCESS_TOKEN'):
raise Exception('WEBEX_TEAMS_ACCESS_TOKEN env var is required.')
# Check for conductor service environment variables, else default
svc_proto = environ.get('CONDUCTOR_PROTO', 'http')
svc_host = environ.get('CONDUCTOR_HOST', 'localhost')
svc_port = environ.get('CONDUCTOR_PORT', '8000')
conductor = library.conductor_service(
proto=svc_proto, host=svc_host, port=svc_port
)
# Load up WebexTeams API instance
webex = WebexTeamsAPI()
# Does the room exist?
webex_room_id = get_webex_room_id(webex, webex_room_title)
if webex_room_id == 0:
raise Exception('Room "{webex_room_title}" not found.')
return webex, webex_room_title, webex_room_id, int(interval), conductor
if __name__ == '__main__':
# Get initial setup information
webex, webex_room_title, webex_room_id, interval, conductor = poller_initialization() # noqa
# Since we are restarting the application, announce to the space
webex.messages.create(
roomId=webex_room_id,
text=f'Service is restarting. Polling interval {interval}s.'
)
latest_message_id = None
poll_wait = interval
print('Starting the polling...')
# Start the polling...
while True:
# Grab the latest messages - list of (id, msg) pairs
try:
latest_message_id, command_message_list = get_latest_commands(
webex, webex_room_id, latest_message_id,
max_messages=25 if latest_message_id else 1,
)
# Okay, we didn't perturb Happy Fun Ball, resume normal polling.
poll_wait = interval
# A list of (id, response) pairs - id to be used for 'parentId'
response_message = parser.parse_command_list(
conductor, command_message_list
)
send_webex_responses(webex, webex_room_id, response_message)
print(response_message)
# Uh oh, Happy Fun Ball is perturbed.
except RateLimitWarning as rlw:
warning_msg = f'Rate Limit Warning: {rlw.retry_after}'
print(warning_msg)
send_webex_message(webex, webex_room_id, warning_msg)
poll_wait = interval + int(rlw.retry_after)
# Now, Happy Fun Ball is smoking. Run far away.
except RateLimitError as rle:
error_msg = f'Rate Limit Error: {rle.retry_after}'
print(error_msg, 'Sleeping for a while...')
sleep(rle.retry_after)
send_webex_message(webex, webex_room_id, error_msg)
# And back off some more, just to be kind
poll_wait = interval + int(rle.retry_after)
finally:
sleep(poll_wait)
| 32.418848 | 96 | 0.672158 |
ed06906976db9a6a9f9b1362fe409a5f52a8b110 | 1,523 | py | Python | problem_8.py | alfonsokim/project-euler | cdc5a271c22f3ad78681ac920f2d9be6e75cdbc5 | [
"Unlicense"
] | null | null | null | problem_8.py | alfonsokim/project-euler | cdc5a271c22f3ad78681ac920f2d9be6e75cdbc5 | [
"Unlicense"
] | null | null | null | problem_8.py | alfonsokim/project-euler | cdc5a271c22f3ad78681ac920f2d9be6e75cdbc5 | [
"Unlicense"
] | null | null | null |
STR_NUMBER = '''
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
'''.strip().replace('\n', '')
# ============================================================
def solve():
"""
"""
current_max = 0
for i in range(len(STR_NUMBER) - 13):
product = reduce(lambda x, y: int(x) * int(y), STR_NUMBER[i : i+13])
current_max = current_max if product <= current_max else product
return current_max
# ============================================================
if __name__ == '__main__':
"""
"""
print solve()
| 36.261905 | 76 | 0.794485 |
cacf10c782d166a43cd3f24ff5df4c977d922dfc | 16,407 | py | Python | google/cloud/gaming_v1/services/game_server_configs_service/transports/grpc.py | LaudateCorpus1/python-game-servers | 9e22e6dd4e2543d694e33eb1ec2c4f9a05d8b940 | [
"Apache-2.0"
] | 11 | 2020-07-13T22:22:10.000Z | 2022-03-17T07:21:36.000Z | google/cloud/gaming_v1/services/game_server_configs_service/transports/grpc.py | LaudateCorpus1/python-game-servers | 9e22e6dd4e2543d694e33eb1ec2c4f9a05d8b940 | [
"Apache-2.0"
] | 83 | 2020-03-23T20:31:56.000Z | 2022-03-07T16:24:02.000Z | google/cloud/gaming_v1/services/game_server_configs_service/transports/grpc.py | LaudateCorpus1/python-game-servers | 9e22e6dd4e2543d694e33eb1ec2c4f9a05d8b940 | [
"Apache-2.0"
] | 7 | 2020-03-20T20:55:03.000Z | 2022-01-29T08:11:07.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.gaming_v1.types import game_server_configs
from google.longrunning import operations_pb2 # type: ignore
from .base import GameServerConfigsServiceTransport, DEFAULT_CLIENT_INFO
class GameServerConfigsServiceGrpcTransport(GameServerConfigsServiceTransport):
"""gRPC backend transport for GameServerConfigsService.
The game server config configures the game servers in an
Agones fleet.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "gameservices.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "gameservices.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def list_game_server_configs(
self,
) -> Callable[
[game_server_configs.ListGameServerConfigsRequest],
game_server_configs.ListGameServerConfigsResponse,
]:
r"""Return a callable for the list game server configs method over gRPC.
Lists game server configs in a given project,
location, and game server deployment.
Returns:
Callable[[~.ListGameServerConfigsRequest],
~.ListGameServerConfigsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_game_server_configs" not in self._stubs:
self._stubs["list_game_server_configs"] = self.grpc_channel.unary_unary(
"/google.cloud.gaming.v1.GameServerConfigsService/ListGameServerConfigs",
request_serializer=game_server_configs.ListGameServerConfigsRequest.serialize,
response_deserializer=game_server_configs.ListGameServerConfigsResponse.deserialize,
)
return self._stubs["list_game_server_configs"]
@property
def get_game_server_config(
self,
) -> Callable[
[game_server_configs.GetGameServerConfigRequest],
game_server_configs.GameServerConfig,
]:
r"""Return a callable for the get game server config method over gRPC.
Gets details of a single game server config.
Returns:
Callable[[~.GetGameServerConfigRequest],
~.GameServerConfig]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_game_server_config" not in self._stubs:
self._stubs["get_game_server_config"] = self.grpc_channel.unary_unary(
"/google.cloud.gaming.v1.GameServerConfigsService/GetGameServerConfig",
request_serializer=game_server_configs.GetGameServerConfigRequest.serialize,
response_deserializer=game_server_configs.GameServerConfig.deserialize,
)
return self._stubs["get_game_server_config"]
@property
def create_game_server_config(
self,
) -> Callable[
[game_server_configs.CreateGameServerConfigRequest], operations_pb2.Operation
]:
r"""Return a callable for the create game server config method over gRPC.
Creates a new game server config in a given project,
location, and game server deployment. Game server
configs are immutable, and are not applied until
referenced in the game server deployment rollout
resource.
Returns:
Callable[[~.CreateGameServerConfigRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_game_server_config" not in self._stubs:
self._stubs["create_game_server_config"] = self.grpc_channel.unary_unary(
"/google.cloud.gaming.v1.GameServerConfigsService/CreateGameServerConfig",
request_serializer=game_server_configs.CreateGameServerConfigRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_game_server_config"]
@property
def delete_game_server_config(
self,
) -> Callable[
[game_server_configs.DeleteGameServerConfigRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete game server config method over gRPC.
Deletes a single game server config. The deletion
will fail if the game server config is referenced in a
game server deployment rollout.
Returns:
Callable[[~.DeleteGameServerConfigRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_game_server_config" not in self._stubs:
self._stubs["delete_game_server_config"] = self.grpc_channel.unary_unary(
"/google.cloud.gaming.v1.GameServerConfigsService/DeleteGameServerConfig",
request_serializer=game_server_configs.DeleteGameServerConfigRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_game_server_config"]
def close(self):
self.grpc_channel.close()
__all__ = ("GameServerConfigsServiceGrpcTransport",)
| 44.104839 | 100 | 0.647833 |
7f37d3a70bb484cfc921194dd4bfb5dd10a46ab1 | 11,942 | py | Python | sdks/python/client/argo_workflows/model/node_selector_term.py | roofurmston/argo-workflows | 79a95f223396ecab408d831781ab2d38d1fa6de0 | [
"Apache-2.0"
] | 7,643 | 2017-08-22T22:10:45.000Z | 2021-02-09T17:13:12.000Z | sdks/python/client/argo_workflows/model/node_selector_term.py | roofurmston/argo-workflows | 79a95f223396ecab408d831781ab2d38d1fa6de0 | [
"Apache-2.0"
] | 4,183 | 2017-08-22T22:45:29.000Z | 2021-02-09T17:41:29.000Z | sdks/python/client/argo_workflows/model/node_selector_term.py | isubasinghe/argo-workflows | 1a6e94f1d490e2265c977514d698a1ca70e14fe3 | [
"Apache-2.0"
] | 1,524 | 2017-08-22T22:10:53.000Z | 2021-02-09T16:26:16.000Z | """
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.node_selector_requirement import NodeSelectorRequirement
globals()['NodeSelectorRequirement'] = NodeSelectorRequirement
class NodeSelectorTerm(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'match_expressions': ([NodeSelectorRequirement],), # noqa: E501
'match_fields': ([NodeSelectorRequirement],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'match_expressions': 'matchExpressions', # noqa: E501
'match_fields': 'matchFields', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""NodeSelectorTerm - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
match_expressions ([NodeSelectorRequirement]): A list of node selector requirements by node's labels.. [optional] # noqa: E501
match_fields ([NodeSelectorRequirement]): A list of node selector requirements by node's fields.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NodeSelectorTerm - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
match_expressions ([NodeSelectorRequirement]): A list of node selector requirements by node's labels.. [optional] # noqa: E501
match_fields ([NodeSelectorRequirement]): A list of node selector requirements by node's fields.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.894737 | 139 | 0.582817 |
5ab1fea35e0ec899493904d7ca79c1d76c886202 | 1,723 | py | Python | managerie_test_app/settings.py | akx/django-managerie | 170255f2f26a821f213d762a4102d1ebc890e8ab | [
"MIT"
] | 2 | 2020-06-03T11:36:25.000Z | 2021-11-28T15:36:06.000Z | managerie_test_app/settings.py | akx/django-managerie | 170255f2f26a821f213d762a4102d1ebc890e8ab | [
"MIT"
] | null | null | null | managerie_test_app/settings.py | akx/django-managerie | 170255f2f26a821f213d762a4102d1ebc890e8ab | [
"MIT"
] | null | null | null | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'vpf9qs8wv6b8k(%6%=0)six1u4z6g@gb0l5(duj$hw_lg_45$l'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_managerie',
'managerie_test_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'managerie_test_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'managerie_test_app.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = []
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
| 26.921875 | 70 | 0.673244 |
73669953a6293a330b4e249df7ae5cfa9ec774ed | 8,325 | py | Python | bruno_api/bruno_api/api.py | kwzrd/bruno | b343d47b76f612e9c40d92b9fb563d3cf4cee4e3 | [
"MIT"
] | null | null | null | bruno_api/bruno_api/api.py | kwzrd/bruno | b343d47b76f612e9c40d92b9fb563d3cf4cee4e3 | [
"MIT"
] | null | null | null | bruno_api/bruno_api/api.py | kwzrd/bruno | b343d47b76f612e9c40d92b9fb563d3cf4cee4e3 | [
"MIT"
] | null | null | null | import asyncio
import logging
import typing as t
from datetime import datetime
from operator import itemgetter
from os import environ
import asyncpg
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import RedirectResponse, Response
from bruno_api.database import Database
from bruno_api.models import (
HTTPError, Offer, OfferHistory, OfferPricepoint, Product, ProductData, ProductId
)
REVISION = environ.get("REVISION", "Unknown revision")
log = logging.getLogger(__name__)
api = FastAPI(
title="Bruno",
description="Applifting exercise",
version=REVISION,
docs_url="/docs",
)
@api.on_event("startup")
async def api_startup() -> None:
"""
Initialise database connection.
On failure, retry multiple times. When the DB starts in parallel with the app (with Compose),
it may not yet be ready to take connections.
"""
log.info("Begin application startup")
n_attempts = 3
for attempt in range(1, n_attempts + 1):
try:
await Database.connect()
except ConnectionError:
log.info(f"Failed to connect to DB (attempt: {attempt}/{n_attempts})")
if attempt >= n_attempts:
raise
await asyncio.sleep(5) # Retry after 5 seconds.
else:
log.info("DB connection pool ready!")
break
@api.on_event("shutdown")
async def api_shutdown() -> None:
"""Close database connection."""
log.info("Begin application shutdown")
await Database.disconnect()
@api.middleware("http")
async def prepare_db_connection(request: Request, call_next: t.Callable) -> Response:
"""
Borrow a DB connection from the pool and attach it to `request`.
This middleware makes it possible for endpoints to conveniently access the database using
a connection from the pool.
"""
async with Database.connection_pool.acquire() as db_conn:
request.state.db_conn = db_conn
response = await call_next(request)
del request.state.db_conn # Connection is now released; remove the attribute.
return response
can_404 = {404: {"description": "Resource not found", "model": HTTPError}}
@api.get("/", status_code=301)
async def root() -> Response:
"""Redirect to documentation."""
return RedirectResponse("/docs", status_code=301)
@api.post("/products", response_model=ProductId, status_code=202)
async def create_product(request: Request, product: ProductData) -> asyncpg.Record:
"""
Create a new product.
Return the product's generated ID.
Note that the product's offers are not available immediately after the request is fulfilled.
"""
return await request.state.db_conn.fetchrow(
"INSERT INTO products (name, description) VALUES ($1, $2) RETURNING id;",
product.name,
product.description,
)
@api.get("/products/{product_id}", response_model=Product, responses=can_404)
async def read_product(request: Request, product_id: int) -> asyncpg.Record:
"""Get product by ID."""
record = await request.state.db_conn.fetchrow(
"SELECT * FROM products WHERE id = $1;",
product_id,
)
if not record:
raise HTTPException(status_code=404, detail="Product not found")
return record
@api.get("/products", response_model=list[Product])
async def read_all_products(request: Request) -> list[asyncpg.Record]:
"""Get all products."""
products = await request.state.db_conn.fetch("SELECT * FROM products;")
return sorted(products, key=itemgetter("id")) # Sort products by ID, i.e. insertion order.
@api.put("/products/{product_id}", response_class=Response, status_code=204, responses=can_404)
async def update_product(request: Request, new_product: ProductData, product_id: int) -> None:
"""Update an existing product by ID."""
updated = await request.state.db_conn.fetchrow(
"UPDATE products SET name = $2, description = $3 WHERE id = $1 RETURNING id;",
product_id,
new_product.name,
new_product.description,
)
if not updated:
raise HTTPException(status_code=404, detail="Product not found")
@api.delete("/products/{product_id}", response_class=Response, status_code=204, responses=can_404)
async def delete_product(request: Request, product_id: int) -> None:
"""Delete product by ID."""
deleted = await request.state.db_conn.fetchrow(
"DELETE FROM products WHERE id = $1 RETURNING id;",
product_id,
)
if not deleted:
raise HTTPException(status_code=404, detail="Product not found")
async def validate_product_is_registered(request: Request, product_id: int) -> None:
"""Raise 404 exception if `product_id` is not marked as registered in the database."""
product = await request.state.db_conn.fetchrow(
"SELECT reg_token_hash FROM products WHERE id = $1;",
product_id,
)
if not product:
raise HTTPException(status_code=404, detail="Product not found")
if not product["reg_token_hash"]:
raise HTTPException(status_code=404, detail="Offers not yet acquired; please try later")
@api.get("/products/{product_id}/offers", response_model=list[Offer], responses=can_404)
async def read_offers(request: Request, product_id: int) -> list[asyncpg.Record]:
"""
Get latest offers for a product by ID.
Offers are not available immediately after a new product is registered. This endpoint responds
with a 404 if called before the resources are ready.
Returns at most 10 minutes old offers. Timestamp is given as POSIX time in UTC.
"""
await validate_product_is_registered(request, product_id)
latest_session: t.Optional[int] = await request.state.db_conn.fetchval(
"SELECT max(timestamp) FROM offers WHERE product_id = $1;",
product_id,
)
if latest_session is None:
return [] # Product seems registered, but we don't have offers.
current_timestamp = datetime.now().timestamp() # Current UTC.
diff = abs(latest_session - current_timestamp)
minute_1 = 60
if diff > 10 * minute_1: # If latest session more than 10 minutes ago.
return [] # We don't have reasonably fresh offers.
# All looks good ~ select offers from this batch.
select_query = (
"SELECT id, price, items_in_stock, timestamp, product_id FROM offers "
"WHERE timestamp = $1 AND product_id = $2;"
)
return await request.state.db_conn.fetch(select_query, latest_session, product_id)
def calculate_price_trend(price_history: list[OfferPricepoint]) -> float:
"""Calculate overall price trend as latest record divided by earliest."""
if not price_history:
return -1.
# In case there is only 1 pricepoint, these will be the same, and that's ok.
latest, earliest = price_history[-1].price, price_history[0].price
if earliest == 0: # Cannot divide by 0. :)
return -1.
return latest / earliest
@api.get(
"/products/{product_id}/offers/{offer_id}/history",
response_model=OfferHistory,
responses=can_404,
)
async def read_offer_history(request: Request, product_id: int, offer_id: int) -> OfferHistory:
"""
Read an offer's price history.
Return a chronologically ordered array of prices over the past 10 minutes. Each price point
is identified by a POSIX UTC timestamp.
An overall trend is computed as the quotient of the latest price over the earliest price
in the considered time period.
"""
await validate_product_is_registered(request, product_id)
offer_history = await request.state.db_conn.fetch(
"SELECT price, items_in_stock, timestamp FROM offers WHERE id = $1 AND product_id = $2;",
offer_id,
product_id,
)
if not offer_history: # The semantics are that no historical records -> no offer.
raise HTTPException(status_code=404, detail="Offer not found")
# Chronological sort; earliest first.
sorted_history = sorted(offer_history, key=itemgetter("timestamp"))
price_history = [
OfferPricepoint(timestamp=record["timestamp"], price=record["price"])
for record in sorted_history
]
return OfferHistory(
price_trend=calculate_price_trend(price_history),
price_history=price_history,
)
| 34.543568 | 98 | 0.695375 |
a67d015ecb95c897776caf364afa555a4b61873f | 2,585 | py | Python | Scheduler/Datasets/RRG/random_regular_graph.py | dsdn/tssdn | bc4f320d7d483d3852597c44676060aa70e8b2b2 | [
"Apache-2.0"
] | 4 | 2021-11-13T11:38:02.000Z | 2022-01-21T19:05:26.000Z | Scheduler/Datasets/RRG/random_regular_graph.py | dsdn/tssdn | bc4f320d7d483d3852597c44676060aa70e8b2b2 | [
"Apache-2.0"
] | null | null | null | Scheduler/Datasets/RRG/random_regular_graph.py | dsdn/tssdn | bc4f320d7d483d3852597c44676060aa70e8b2b2 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
'''
Generates a random regular graph and creates a random set of flows with period
Output -
links.dat -> (n1, n2)
flows.dat -> (src, dst, p)
readme
Author - Naresh Nayak
Date - 15.02.2016
'''
import networkx as nx
import random, os, sys
# Input data required for topology generation
nSwitches = 12
nHosts = 50
nDegree = 10
nFlows = 100
maxPeriod = 1
maxDst = 1
nSeed = 0
coreFlowsOnly = 1
# A. Build a random graphs
randGraph = nx.random_regular_graph(nDegree, nSwitches, nSeed).to_directed()
random.seed(nSeed)
# B. Construct required input data for ILP
# B.1 -> Random topology corresponding to the random graph
hosts = []
switches = []
flows = []
flowHosts = []
hostToSwitch = {}
topo = nx.DiGraph()
for h in range(0, nHosts):
hosts.append("h"+str(h))
for s in range(0, nSwitches):
switches.append("s"+str(s))
for (u,v) in randGraph.edges():
topo.add_edge("s"+str(u), "s"+str(v))
for h in range(0, len(hosts)):
sw = random.randint(0, len(switches)-1)
hostToSwitch["h"+str(h)] = sw
topo.add_edge("h"+str(h), "s"+str(sw))
topo.add_edge("s"+str(sw), "h"+str(h))
links = topo.edges()
while len(flows) < nFlows:
r = random.randint(0,len(hosts)-1)
hostSrc = "h"+str(r)
numDst = random.randint(1, maxDst)
hostDst = []
while len(hostDst) < numDst:
r = random.randint(0,len(hosts)-1)
dst = "h"+str(r)
if (hostSrc != dst) and dst not in hostDst:
hostDst.append(dst)
edgeDstSwitches = [hostToSwitch[h] for h in hostDst]
edgeSrcSwitches = [hostToSwitch[h] for h in [hostSrc]]
if len(set(edgeDstSwitches + edgeSrcSwitches)) == 1 and coreFlowsOnly == 1:
proceed = False
else:
proceed = True
if proceed:
if (hostSrc, set(hostDst)) not in flowHosts:
flow = (hostSrc, hostDst, random.randint(1, maxPeriod))
flows.append(flow)
flowHosts.append((hostSrc, set(hostDst)))
# Create directory name
strDir = "RRG-s"+str(nSwitches)+"-h"+str(nHosts)+"-d"+str(nDegree)+"-f"+str(nFlows)+"-mp"+str(maxPeriod)+"-md"+str(maxDst)+"-seed"+str(nSeed)+"-c"+str(coreFlowsOnly)
try:
os.mkdir(strDir)
except OSError:
pass
# Write links.dat
fLinks = open(strDir+"/links.dat", "w")
for (u,v) in links:
fLinks.write("(" + u + " " + v + ")\n")
fLinks.close()
# Write flows.dat
fFlows = open(strDir+"/flows.dat", "w")
for (src, dst, p) in flows:
strDst = ""
for h in dst:
strDst += h + " "
fFlows.write("(" + src + " " + strDst[:-1] + " " + str(p) +")\n")
fFlows.close()
| 25.343137 | 165 | 0.616248 |
b9505f8639731379156c57ebbd36512c8b30ac02 | 2,154 | py | Python | iota/transaction/utils.py | JakeSCahill/iota.py | 1e691ba02ccf7687f06d1a39a8ae4babdfaacc26 | [
"MIT"
] | null | null | null | iota/transaction/utils.py | JakeSCahill/iota.py | 1e691ba02ccf7687f06d1a39a8ae4babdfaacc26 | [
"MIT"
] | null | null | null | iota/transaction/utils.py | JakeSCahill/iota.py | 1e691ba02ccf7687f06d1a39a8ae4babdfaacc26 | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from calendar import timegm as unix_timestamp
from datetime import datetime
from typing import Text
from iota import STANDARD_UNITS
from iota.exceptions import with_context
__all__ = [
'convert_value_to_standard_unit',
'get_current_timestamp',
]
def convert_value_to_standard_unit(value, symbol='i'):
# type: (Text, Text) -> float
"""
Converts between any two standard units of iota.
:param value:
Value (affixed) to convert. For example: '1.618 Mi'.
:param symbol:
Unit symbol of iota to convert to. For example: 'Gi'.
:return:
Float as units of given symbol to convert to.
"""
try:
# Get input value
value_tuple = value.split()
amount = float(value_tuple[0])
except (ValueError, IndexError, AttributeError):
raise with_context(
ValueError('Value to convert is not valid.'),
context={
'value': value,
},
)
try:
# Set unit symbols and find factor/multiplier.
unit_symbol_from = value_tuple[1]
unit_factor_from = float(STANDARD_UNITS[unit_symbol_from])
unit_factor_to = float(STANDARD_UNITS[symbol])
except (KeyError, IndexError):
# Invalid symbol or no factor
raise with_context(
ValueError('Invalid IOTA unit.'),
context={
'value': value,
'symbol': symbol,
},
)
return amount * (unit_factor_from / unit_factor_to)
def get_current_timestamp():
# type: () -> int
"""
Returns the current timestamp, used to set ``timestamp`` for new
:py:class:`ProposedTransaction` objects.
Split out into a separate function so that it can be mocked during
unit tests.
"""
# Python 3.3 introduced a :py:meth:`datetime.timestamp` method, but
# for compatibility with Python 2, we have to do it the
# old-fashioned way.
# http://stackoverflow.com/q/2775864/
return unix_timestamp(datetime.utcnow().timetuple())
| 27.615385 | 71 | 0.637883 |
d1a2088589a0fd125f1f8f6170be86959b87703e | 2,863 | py | Python | instana/instrumentation/cassandra_inst.py | instana/python-sensor | 1005ad0012e126d504b3670bbca7fe90ecc949bd | [
"MIT"
] | 61 | 2017-09-27T02:50:17.000Z | 2022-03-22T12:13:37.000Z | instana/instrumentation/cassandra_inst.py | instana/python-sensor | 1005ad0012e126d504b3670bbca7fe90ecc949bd | [
"MIT"
] | 82 | 2017-07-11T13:47:33.000Z | 2022-03-22T10:10:38.000Z | instana/instrumentation/cassandra_inst.py | instana/python-sensor | 1005ad0012e126d504b3670bbca7fe90ecc949bd | [
"MIT"
] | 27 | 2017-09-11T16:22:32.000Z | 2022-03-11T17:21:49.000Z | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
"""
cassandra instrumentation
https://docs.datastax.com/en/developer/python-driver/3.20/
https://github.com/datastax/python-driver
"""
from __future__ import absolute_import
import wrapt
from ..log import logger
from ..util.traceutils import get_active_tracer
try:
import cassandra
consistency_levels = dict({0: "ANY",
1: "ONE",
2: "TWO",
3: "THREE",
4: "QUORUM",
5: "ALL",
6: "LOCAL_QUORUM",
7: "EACH_QUORUM",
8: "SERIAL",
9: "LOCAL_SERIAL",
10: "LOCAL_ONE"})
def collect_response(span, fn):
tried_hosts = list()
for host in fn.attempted_hosts:
tried_hosts.append("%s:%d" % (host.endpoint.address, host.endpoint.port))
span.set_tag("cassandra.triedHosts", tried_hosts)
span.set_tag("cassandra.coordHost", fn.coordinator_host)
cl = fn.query.consistency_level
if cl and cl in consistency_levels:
span.set_tag("cassandra.achievedConsistency", consistency_levels[cl])
def cb_request_finish(results, span, fn):
collect_response(span, fn)
span.finish()
def cb_request_error(results, span, fn):
collect_response(span, fn)
span.mark_as_errored({"cassandra.error": results.message})
span.finish()
def request_init_with_instana(fn):
active_tracer = get_active_tracer()
if active_tracer is not None:
parent_span = active_tracer.active_span
ctags = dict()
if isinstance(fn.query, cassandra.query.SimpleStatement):
ctags["cassandra.query"] = fn.query.query_string
elif isinstance(fn.query, cassandra.query.BoundStatement):
ctags["cassandra.query"] = fn.query.prepared_statement.query_string
ctags["cassandra.keyspace"] = fn.session.keyspace
ctags["cassandra.cluster"] = fn.session.cluster.metadata.cluster_name
span = active_tracer.start_span(
operation_name="cassandra",
child_of=parent_span,
tags=ctags)
fn.add_callback(cb_request_finish, span, fn)
fn.add_errback(cb_request_error, span, fn)
@wrapt.patch_function_wrapper('cassandra.cluster', 'Session.__init__')
def init_with_instana(wrapped, instance, args, kwargs):
session = wrapped(*args, **kwargs)
instance.add_request_init_listener(request_init_with_instana)
return session
logger.debug("Instrumenting cassandra")
except ImportError:
pass
| 32.534091 | 85 | 0.592036 |
dc37bc723564c177bd363aa2030093cfc6d12851 | 5,387 | py | Python | saleor/cart/__init__.py | X10project/rob_photography | baaeed11e13d1f4977c24f5f6601b1c6fbcf39b5 | [
"BSD-3-Clause"
] | 3 | 2015-12-30T19:06:27.000Z | 2021-10-06T04:23:36.000Z | saleor/cart/__init__.py | X10project/rob_photography | baaeed11e13d1f4977c24f5f6601b1c6fbcf39b5 | [
"BSD-3-Clause"
] | null | null | null | saleor/cart/__init__.py | X10project/rob_photography | baaeed11e13d1f4977c24f5f6601b1c6fbcf39b5 | [
"BSD-3-Clause"
] | 4 | 2019-09-17T11:39:41.000Z | 2022-01-24T10:22:50.000Z | from __future__ import unicode_literals
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import pgettext
from prices import Price
from satchless import cart
from satchless.item import ItemList, partition
CART_SESSION_KEY = 'cart'
class ProductGroup(ItemList):
def is_shipping_required(self):
return any(p.is_shipping_required() for p in self)
class CartLine(cart.CartLine):
def __init__(self, product, quantity, data=None, discounts=None):
super(CartLine, self).__init__(product, quantity, data=data)
self.discounts = discounts
def get_price_per_item(self, **kwargs):
kwargs.setdefault('discounts', self.discounts)
return super(CartLine, self).get_price_per_item(**kwargs)
def is_shipping_required(self):
return self.product.is_shipping_required()
@python_2_unicode_compatible
class Cart(cart.Cart):
"""Full-fledged cart implementation
This implementation depends on the database to implement its functionality.
A lightweight SessionCart is kept internally for easy storage.
"""
timestamp = None
billing_address = None
def __init__(self, session_cart, discounts=None):
super(Cart, self).__init__()
self.session_cart = session_cart
self.discounts = discounts
def __str__(self):
return pgettext(
'Shopping cart',
'Your cart (%(cart_count)s)') % {'cart_count': self.count()}
@classmethod
def for_session_cart(cls, session_cart, discounts=None):
from ..product.models import Product
cart = Cart(session_cart, discounts=discounts)
product_ids = [item.data['product_id'] for item in session_cart]
products = Product.objects.filter(id__in=product_ids)
products = products.select_subclasses()
product_map = dict((p.id, p) for p in products)
for item in session_cart:
try:
product = product_map[item.data['product_id']]
except KeyError:
# TODO: Provide error message
continue
else:
variant = product.variants.get_subclass(
pk=item.data['variant_id'])
quantity = item.quantity
cart.add(variant, quantity=quantity, check_quantity=False,
skip_session_cart=True)
return cart
def get_data_for_product(self, variant):
variant_price = variant.get_price_per_item(discounts=self.discounts)
variant_data = {
'product_slug': variant.product.get_slug(),
'product_id': variant.product.pk,
'variant_id': variant.pk,
'unit_price_gross': str(variant_price.gross),
'unit_price_net': str(variant_price.net)}
return variant_data
def add(self, product, quantity=1, data=None, replace=False,
check_quantity=True, skip_session_cart=False):
super(Cart, self).add(product, quantity, data, replace, check_quantity)
data = self.get_data_for_product(product)
if not skip_session_cart:
display = product.display_product()
self.session_cart.add(display, quantity, data, replace=replace)
def clear(self):
super(Cart, self).clear()
self.session_cart.clear()
def create_line(self, product, quantity, data):
return CartLine(product, quantity, data=data, discounts=self.discounts)
def is_shipping_required(self):
return any(line.is_shipping_required() for line in self)
def partition(self):
return partition(
self,
lambda p: 'physical' if p.is_shipping_required() else 'digital',
ProductGroup)
class SessionCartLine(cart.CartLine):
def get_price_per_item(self, **kwargs):
gross = self.data['unit_price_gross']
net = self.data['unit_price_net']
return Price(net=net, gross=gross, currency=settings.DEFAULT_CURRENCY)
def for_storage(self):
return {
'product': self.product,
'quantity': self.quantity,
'data': self.data}
@classmethod
def from_storage(cls, data_dict):
product = data_dict['product']
quantity = data_dict['quantity']
data = data_dict['data']
instance = SessionCartLine(product, quantity, data)
return instance
@python_2_unicode_compatible
class SessionCart(cart.Cart):
"""Simplified cart representation that gets serialized into the user's session.
It contains just enough information to display cart contents on every page
without executing any database queries. At times it may be inaccurate if
prices or item availability change but it should not matter as checkout
depends on the full representation of the cart.
"""
def __str__(self):
return 'SessionCart'
@classmethod
def from_storage(cls, cart_data):
cart = SessionCart()
for line_data in cart_data['items']:
cart._state.append(SessionCartLine.from_storage(line_data))
return cart
def for_storage(self):
cart_data = {
'items': [i.for_storage() for i in self]}
return cart_data
def create_line(self, product, quantity, data):
return SessionCartLine(product, quantity, data)
| 34.532051 | 83 | 0.664563 |
5ad9219be7f163da39820bf0abead491de259278 | 5,025 | py | Python | util/checkpoint-tester.py | He-Liu-ooo/Computer-Architecture-THUEE-2022-spring- | 9d36aaacbc7eea357608524113bec97bae2ea229 | [
"BSD-3-Clause"
] | 8 | 2021-12-17T08:07:14.000Z | 2022-03-23T11:49:06.000Z | util/checkpoint-tester.py | He-Liu-ooo/Computer-Architecture-THUEE-2022-spring- | 9d36aaacbc7eea357608524113bec97bae2ea229 | [
"BSD-3-Clause"
] | 3 | 2022-01-09T07:50:03.000Z | 2022-02-05T14:46:57.000Z | util/checkpoint-tester.py | He-Liu-ooo/Computer-Architecture-THUEE-2022-spring- | 9d36aaacbc7eea357608524113bec97bae2ea229 | [
"BSD-3-Clause"
] | 5 | 2021-12-27T08:39:13.000Z | 2022-03-08T10:21:37.000Z | #! /usr/bin/env python2.7
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Steve Reinhardt
#
# Basic test script for checkpointing.
#
# Given an M5 command and an interval (in ticks), this script will:
# 1. Run the command, dumping periodic checkpoints at the given interval.
# 2. Rerun the command for each pair of adjacent checkpoints:
# a. Restore from checkpoint N
# b. Run until the timestamp of checkpoint N+1
# c. Dump a checkpoint and end the simulation
# d. Diff the new checkpoint with the original checkpoint N+1
#
# Note that '--' must be used to separate the script options from the
# M5 command line.
#
# Caveats:
#
# - This script relies on the checkpoint options implemented in
# configs/common/Simulation.py, so it works with commands based on
# the se.py and fs.py scripts in configs/example, but does not work
# directly with the existing regression tests.
# - Interleaving simulator and program output can cause discrepancies
# in the file position checkpoint information since different runs
# have different amount of simulator output.
# - Probably lots more issues we don't even know about yet.
#
# Examples:
#
# util/checkpoint-tester.py -i 400000 -- build/<ISA>/m5.opt \
# configs/example/se.py -c tests/test-progs/hello/bin/<isa>/tru64/hello \
# --output=progout --errout=progerr
#
# util/checkpoint-tester.py -i 200000000000 -- build/<ISA>/m5.opt \
# configs/example/fs.py --script tests/halt.sh
#
import os, sys, re
import subprocess
import optparse
parser = optparse.OptionParser()
parser.add_option('-i', '--interval', type='int')
parser.add_option('-d', '--directory', default='checkpoint-test')
(options, args) = parser.parse_args()
interval = options.interval
if os.path.exists(options.directory):
print 'Error: test directory', options.directory, 'exists'
print ' Tester needs to create directory from scratch'
sys.exit(1)
top_dir = options.directory
os.mkdir(top_dir)
cmd_echo = open(os.path.join(top_dir, 'command'), 'w')
print >>cmd_echo, ' '.join(sys.argv)
cmd_echo.close()
m5_binary = args[0]
options = args[1:]
initial_args = ['--take-checkpoints', '%d,%d' % (interval, interval)]
cptdir = os.path.join(top_dir, 'm5out')
print '===> Running initial simulation.'
subprocess.call([m5_binary] + ['-red', cptdir] + options + initial_args)
dirs = os.listdir(cptdir)
expr = re.compile('cpt\.([0-9]*)')
cpts = []
for dir in dirs:
match = expr.match(dir)
if match:
cpts.append(int(match.group(1)))
cpts.sort()
# We test by loading checkpoint N, simulating to (and dumping at)
# checkpoint N+1, then comparing the resulting checkpoint with the
# original checkpoint N+1. Thus the number of tests we can run is one
# less than tha number of checkpoints.
for i in range(1, len(cpts)):
print '===> Running test %d of %d.' % (i, len(cpts)-1)
mydir = os.path.join(top_dir, 'test.%d' % i)
subprocess.call([m5_binary] + ['-red', mydir] + options + initial_args +
['--max-checkpoints' , '1', '--checkpoint-dir', cptdir,
'--checkpoint-restore', str(i)])
cpt_name = 'cpt.%d' % cpts[i]
diff_name = os.path.join(mydir, 'diffout')
diffout = open(diff_name, 'w')
subprocess.call(['diff', '-ru', '-I', '^##.*',
'%s/%s' % (cptdir, cpt_name),
'%s/%s' % (mydir, cpt_name)], stdout=diffout)
diffout.close()
# print out the diff
diffout = open(diff_name)
print diffout.read(),
diffout.close()
| 36.948529 | 78 | 0.70806 |
c674b3e16c825deb086a0ded923654386b19352c | 6,545 | py | Python | templates/zeroboot_reservation/zeroboot_reservation_test.py | threefoldtech/0-templates | 4106bb3d4d1de305557bf4748a7d77ffeb302abb | [
"Apache-2.0"
] | 1 | 2019-01-20T17:50:53.000Z | 2019-01-20T17:50:53.000Z | templates/zeroboot_reservation/zeroboot_reservation_test.py | threefoldtech/0-templates | 4106bb3d4d1de305557bf4748a7d77ffeb302abb | [
"Apache-2.0"
] | 192 | 2018-08-01T13:31:16.000Z | 2020-05-29T09:41:06.000Z | templates/zeroboot_reservation/zeroboot_reservation_test.py | threefoldtech/0-templates | 4106bb3d4d1de305557bf4748a7d77ffeb302abb | [
"Apache-2.0"
] | 1 | 2018-08-09T12:30:52.000Z | 2018-08-09T12:30:52.000Z | import os
import pytest
from unittest.mock import MagicMock
from jumpscale import j
from zerorobot.template.state import StateCheckError
from JumpscaleZrobot.test.utils import ZrobotBaseTest
from zeroboot_reservation import ZerobootReservation
class TestZerobootReservationTemplate(ZrobotBaseTest):
@classmethod
def setUpClass(cls):
super().preTest(os.path.dirname(__file__), ZerobootReservation)
cls._valid_data = {"zerobootPool": "pool1", "lkrnUrl": "some-url"}
def test_validation(self):
reservation = ZerobootReservation(name="test", data=self._valid_data)
reservation.api = MagicMock()
reservation.validate()
def test_validation_invalid_data(self):
data = {
"zerobootPool": "pool1",
}
reservation = ZerobootReservation(name="test", data=data)
reservation.api = MagicMock()
# missing lkrnUrl
with pytest.raises(ValueError, message="Should fail due to missing lkrnUrl") as exinfo:
reservation.validate()
if not "lkrnUrl" in str(exinfo):
pytest.fail("Validation failed but did not contain missing data 'lkrnUrl': %s" % exinfo)
# missing pool
reservation.data = {
"lkrnUrl": "some-url",
}
with pytest.raises(ValueError, message="Should fail due to missing zerobootPool") as exinfo:
reservation.validate()
if not "zerobootPool" in str(exinfo):
pytest.fail("Validation failed but did not contain missing data 'zerobootPool': %s" % exinfo)
def test_validation_zeroboot_host(self):
# provide zeroboot host before installing
data = {
"lkrnUrl": "some-url",
"zerobootPool": "pool1",
"hostInstance": "host-foo",
}
reservation = ZerobootReservation(name="test", data=data)
reservation.api = MagicMock()
with pytest.raises(ValueError, message="Should fail due to provided hostInstance before installing") as exinfo:
reservation.validate()
if not "hostInstance" in str(exinfo):
pytest.fail("Expected an error but received error was not for 'hostInstance': %s" % exinfo)
# no zeroboot host after installing
reservation.state.set("actions", "install", "ok")
reservation.data = {
"lkrnUrl": "some-url",
"zerobootPool": "pool1",
}
with pytest.raises(ValueError, message="Should fail due to provided missing hostInstance after installing") as exinfo:
reservation.validate()
if not "hostInstance" in str(exinfo):
pytest.fail("Expected an error but received error was not for 'hostInstance': %s" % exinfo)
def test_install(self):
reservation = ZerobootReservation(name="test", data=self._valid_data)
reservation.api = MagicMock()
reserved_host = "host1"
mock_pool1 = MagicMock()
mock_pool1.schedule_action().wait().result = reserved_host
reservation.api.services.get = MagicMock(return_value=mock_pool1)
reservation.install()
reservation.state.check("actions", "install", "ok")
def test_uninstall(self):
# install
reservation = ZerobootReservation(name="test", data=self._valid_data)
reservation.api = MagicMock()
reserved_host = "host1"
mock_pool1 = MagicMock()
mock_pool1.schedule_action().wait().result = reserved_host
reservation.api.services.get = MagicMock(return_value=mock_pool1)
reservation.install()
# should not fail as service was installed
reservation.state.check("actions", "install", "ok")
# uninstall
reservation.uninstall()
# check power off called
reservation.api.services.get().schedule_action.assert_called_with("power_off")
# check 'hostInstance' cleared
if reservation.data.get('hostInstance'):
pytest.fail("'hostInstance' should be cleared after uninstall")
# check action install state
with pytest.raises(StateCheckError, message="reservation service should now be uninstalled"):
reservation.state.check("actions", "install", "ok")
def test_power_on_installed(self):
reservation = ZerobootReservation(name="test", data=self._valid_data)
reservation.api = MagicMock()
with pytest.raises(StateCheckError, message="power_on should failed as the service in not installed"):
reservation.power_on()
reservation.install()
reservation.power_on()
def test_power_off_installed(self):
reservation = ZerobootReservation(name="test", data=self._valid_data)
reservation.api = MagicMock()
with pytest.raises(StateCheckError, message="power_off should failed as the service in not installed"):
reservation.power_off()
reservation.install()
reservation.power_off()
def test_power_cycle_installed(self):
reservation = ZerobootReservation(name="test", data=self._valid_data)
reservation.api = MagicMock()
with pytest.raises(StateCheckError, message="power_cycle should failed as the service in not installed"):
reservation.power_cycle()
reservation.install()
reservation.power_cycle()
def test_power_status_installed(self):
reservation = ZerobootReservation(name="test", data=self._valid_data)
reservation.api = MagicMock()
with pytest.raises(StateCheckError, message="power_status should failed as the service in not installed"):
reservation.power_status()
reservation.install()
reservation.power_status()
def test_monitor_installed(self):
reservation = ZerobootReservation(name="test", data=self._valid_data)
reservation.api = MagicMock()
with pytest.raises(StateCheckError, message="monitor should failed as the service in not installed"):
reservation.monitor()
reservation.install()
reservation.monitor()
def test_configure_ipxe_boot_installed(self):
reservation = ZerobootReservation(name="test", data=self._valid_data)
reservation.api = MagicMock()
with pytest.raises(StateCheckError, message="configure_ipxe_boot should failed as the service in not installed"):
reservation.configure_ipxe_boot("some.boot.url")
reservation.install()
reservation.configure_ipxe_boot("some.boot.url")
| 36.977401 | 126 | 0.668449 |
76dd52f14b261da577c3aa33a93188b39ce2dd1f | 138 | py | Python | 04/04_P17.py | endowp/Python101 | 9c29387f4ed53d10579613ecf5153b71abf7ccd7 | [
"MIT"
] | null | null | null | 04/04_P17.py | endowp/Python101 | 9c29387f4ed53d10579613ecf5153b71abf7ccd7 | [
"MIT"
] | null | null | null | 04/04_P17.py | endowp/Python101 | 9c29387f4ed53d10579613ecf5153b71abf7ccd7 | [
"MIT"
] | null | null | null | s=input()
temp=""
for i in s:
for j in s:
count=0
if j==i and count==1:
print(j, end="")
count+=1
| 15.333333 | 29 | 0.42029 |
61989ac0407ecccb4b8d3dc24484214619f89e60 | 1,719 | py | Python | profiles_api/migrations/0001_initial.py | mamalfender/profile-rest-api | b3b8366d89e8a9d2c9664395c2fd7617bad6ae1f | [
"MIT"
] | null | null | null | profiles_api/migrations/0001_initial.py | mamalfender/profile-rest-api | b3b8366d89e8a9d2c9664395c2fd7617bad6ae1f | [
"MIT"
] | null | null | null | profiles_api/migrations/0001_initial.py | mamalfender/profile-rest-api | b3b8366d89e8a9d2c9664395c2fd7617bad6ae1f | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-06-03 14:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.558824 | 266 | 0.64107 |
3ec62c76f37ac2f42b5f711824419970887e5b66 | 1,020 | py | Python | test_package/conanfile.py | Konijnendijk/conan-liblinear | 54387019ac85c64cfc1482dc52383faaf4123e15 | [
"MIT"
] | null | null | null | test_package/conanfile.py | Konijnendijk/conan-liblinear | 54387019ac85c64cfc1482dc52383faaf4123e15 | [
"MIT"
] | null | null | null | test_package/conanfile.py | Konijnendijk/conan-liblinear | 54387019ac85c64cfc1482dc52383faaf4123e15 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from conans import ConanFile, CMake, tools, RunEnvironment
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def imports(self):
self.copy("*.a", "", "lib")
self.copy("*.so*", "", "lib")
self.copy("*.dll*", "", "bin")
def test(self):
with tools.environment_append(RunEnvironment(self).vars):
bin_path = os.path.join("bin", "test_package")
if self.settings.os == "Windows":
self.run(bin_path)
elif self.settings.os == "Macos":
self.run("DYLD_LIBRARY_PATH=%s %s" % (os.environ.get('DYLD_LIBRARY_PATH', ''), bin_path))
else:
self.run("LD_LIBRARY_PATH=%s %s" % (os.environ.get('LD_LIBRARY_PATH', ''), bin_path))
| 31.875 | 105 | 0.573529 |
fef6752cbbbdde30b4197c6926c31b68e1747b12 | 2,872 | py | Python | corpus-utils/conllu2json.py | recognai/spacy-dev-resources | 9cc3989cb516dd94cfda0d99367ab3f4b92b8170 | [
"MIT"
] | 1 | 2017-04-20T13:00:32.000Z | 2017-04-20T13:00:32.000Z | corpus-utils/conllu2json.py | recognai/spacy-dev-resources | 9cc3989cb516dd94cfda0d99367ab3f4b92b8170 | [
"MIT"
] | null | null | null | corpus-utils/conllu2json.py | recognai/spacy-dev-resources | 9cc3989cb516dd94cfda0d99367ab3f4b92b8170 | [
"MIT"
] | 4 | 2018-03-20T01:39:32.000Z | 2021-06-24T15:55:55.000Z | """Convert conluu files into JSON format for use with train cli.
use_morphology parameter enables appending morphology to tags, which is
useful for languages such as Spanish, where UD tags are not so rich.
"""
from spacy.gold import read_json_file, merge_sents
import io, json, plac
def read_conllx(loc, use_morphology=False, n=0):
with io.open(loc, 'r', encoding='utf8') as file_:
text = file_.read()
i = 0
for sent in text.strip().split('\n\n'):
lines = sent.strip().split('\n')
if lines:
while lines[0].startswith('#'):
lines.pop(0)
tokens = []
for line in lines:
id_, word, lemma, pos, tag, morph, head, dep, _1, \
_2 = line.split('\t')
if '-' in id_ or '.' in id_:
continue
try:
id_ = int(id_) - 1
head = (int(head) - 1) if head != '0' else id_
dep = 'ROOT' if dep == 'root' else dep
tag = pos+'__'+morph if use_morphology else pos
tokens.append((id_, word, tag, head, dep, 'O'))
except:
print(line)
raise
tuples = [list(t) for t in zip(*tokens)]
yield (None, [[tuples, []]])
i += 1
if n >= 1 and i >= n:
break
def generate_sentence(sent):
(id_, word, tag, head, dep, _ ) = sent
sentence = {}
tokens = []
for i, id in enumerate(id_):
token = {}
token["orth"] = word[id]
token["tag"] = tag[id]
token["head"] = head[id] - i
token["dep"] = dep[id]
tokens.append(token)
sentence["tokens"] = tokens
return sentence
def create_doc(sentences,id):
doc = {}
paragraph = {}
doc["id"] = id
doc["paragraphs"] = []
paragraph["sentences"] = sentences
doc["paragraphs"].append(paragraph)
return doc
def main(loc, use_morphology=True, n_sentences_per_doc=10):
docs = []
sentences = []
print('Reading input file {}'.format(loc))
conll_tuples = read_conllx(loc, use_morphology=use_morphology)
for i, (raw_text, tokens) in enumerate(conll_tuples):
sentence, brackets = tokens[0]
sentences.append(generate_sentence(sentence))
# Real-sized documents could be extracted using the comments on the conluu document
if(len(sentences) % n_sentences_per_doc == 0):
doc = create_doc(sentences, i)
docs.append(doc)
sentences = []
print('Created {} documents'.format(len(docs)))
json_out = loc.replace(".conllu", ".json")
print('Writing output file {}'.format(json_out))
with io.open(json_out, 'w') as outfile:
json.dump(docs, outfile)
if __name__ == '__main__':
plac.call(main)
| 33.011494 | 91 | 0.547006 |
b615ffaf9b7da0a85e1b8848b492d8de59db2580 | 6,130 | py | Python | cli/cook/subcommands/tail.py | dposada/Cook | 0d7def46e50dad5f172970a80faac9cd9b17f77b | [
"Apache-2.0"
] | null | null | null | cli/cook/subcommands/tail.py | dposada/Cook | 0d7def46e50dad5f172970a80faac9cd9b17f77b | [
"Apache-2.0"
] | null | null | null | cli/cook/subcommands/tail.py | dposada/Cook | 0d7def46e50dad5f172970a80faac9cd9b17f77b | [
"Apache-2.0"
] | null | null | null | import time
from functools import partial
from cook.mesos import read_file
from cook.querying import query_unique_and_run, parse_entity_refs
from cook.util import check_positive, guard_no_cluster
CHUNK_SIZE = 4096
LINE_DELIMITER = '\n'
DEFAULT_NUM_LINES = 10
DEFAULT_FOLLOW_SLEEP_SECS = 1.0
DEFAULT_PATH = 'stdout'
# For everything we print in tail, we want to forcibly flush
# the stream, and we don't want to end with a newline
__print = partial(print, flush=True, end='')
def print_lines(lines):
"""Prints the given list of lines, delimited, and with no trailing newline"""
__print(LINE_DELIMITER.join(lines))
def check_enough_lines_read(line_buffer, num_lines_to_print):
"""If enough lines have been read to satisfy the user's request, prints those lines"""
num_lines_buffered = len(line_buffer)
if num_lines_buffered > 0:
# If the last line is empty, don't count it as a line that we care about
last_line_empty = line_buffer[-1] == ''
num_lines_printable = num_lines_buffered - (1 if last_line_empty else 0)
if num_lines_printable >= num_lines_to_print:
if last_line_empty:
num_lines_to_print = num_lines_to_print + 1
print_lines(line_buffer[-num_lines_to_print:])
return True
return False
def check_start_of_file(offset, partial_line_buffer, line_buffer):
"""If we have reached the start of the file, prints what we have read"""
if offset == 0:
__print(partial_line_buffer)
num_lines_buffered = len(line_buffer)
if num_lines_buffered > 0:
__print('\n')
print_lines(line_buffer)
return True
return False
def tail_backwards(file_size, read_fn, num_lines_to_print):
"""
Reads chunks backwards from the end of the file and splits them into
lines as it goes. If it finds that enough lines have been read to satisfy
the user's request, or if it reaches the beginning of the file, it stops
"""
offset = max(file_size - CHUNK_SIZE, 0)
length = file_size - offset
partial_line_buffer = ''
line_buffer = []
while True:
# Read the data at offset and length
resp = read_fn(offset=offset, length=length)
data = resp['data']
# Add to our buffer of text we've read from the agent
partial_line_buffer = data + partial_line_buffer
# Attempt to split into lines
lines = partial_line_buffer.split(LINE_DELIMITER)
if len(lines) > 1:
index_first_delimiter = len(lines[0])
partial_line_buffer = partial_line_buffer[:index_first_delimiter]
line_buffer = lines[1:] + line_buffer
# Check if we've read enough lines
if check_enough_lines_read(line_buffer, num_lines_to_print):
break
# Check if we've reached the start of the file
if check_start_of_file(offset, partial_line_buffer, line_buffer):
break
# Update our offset and length
new_offset = max(offset - CHUNK_SIZE, 0)
length = offset - new_offset
offset = new_offset
def tail_follow(file_size, read_fn, follow_sleep_seconds):
"""Follows the file as it grows, printing new contents"""
offset = file_size
length = CHUNK_SIZE
while True:
resp = read_fn(offset=offset, length=length)
data = resp['data']
num_chars_read = len(data)
if num_chars_read > 0:
__print(data)
offset = offset + num_chars_read
time.sleep(follow_sleep_seconds)
def tail_for_instance(instance, sandbox_dir, path, num_lines_to_print, follow, follow_sleep_seconds):
"""
Tails the contents of the Mesos sandbox path for the given instance. If follow is truthy, it will
try and read more data from the file until the user terminates. This assumes files will not shrink.
"""
read = partial(read_file, instance=instance, sandbox_dir=sandbox_dir, path=path)
file_size = read()['offset']
tail_backwards(file_size, read, num_lines_to_print)
if follow:
tail_follow(file_size, read, follow_sleep_seconds)
def tail(clusters, args, _):
"""Tails the contents of the corresponding Mesos sandbox path by job or instance uuid."""
guard_no_cluster(clusters)
entity_refs, clusters_of_interest = parse_entity_refs(clusters, args.get('uuid'))
path = args.get('path')
lines = args.get('lines')
follow = args.get('follow')
sleep_interval = args.get('sleep-interval')
wait = args.get('wait')
if len(entity_refs) > 1:
# argparse should prevent this, but we'll be defensive anyway
raise Exception(f'You can only provide a single uuid.')
command_fn = partial(tail_for_instance, path=path, num_lines_to_print=lines,
follow=follow, follow_sleep_seconds=sleep_interval)
query_unique_and_run(clusters_of_interest, entity_refs[0], command_fn, wait)
def register(add_parser, add_defaults):
"""Adds this sub-command's parser and returns the action function"""
parser = add_parser('tail', help='output last part of files by job or instance uuid')
parser.add_argument('--lines', '-n', help=f'output the last NUM lines (default {DEFAULT_NUM_LINES})',
metavar='NUM', type=check_positive)
parser.add_argument('--follow', '-f', help='output appended data as the file grows', action='store_true')
parser.add_argument('--sleep-interval', '-s',
help=f'with -f, sleep for N seconds (default {DEFAULT_FOLLOW_SLEEP_SECS}) between iterations',
metavar='N', type=float)
parser.add_argument('--wait', '-w',
help='wait indefinitely for the job to be running and for the file to become available',
action='store_true')
parser.add_argument('uuid', nargs=1)
parser.add_argument('path', nargs='?')
add_defaults('tail', {'lines': DEFAULT_NUM_LINES,
'sleep-interval': DEFAULT_FOLLOW_SLEEP_SECS,
'path': DEFAULT_PATH})
return tail
| 38.797468 | 118 | 0.674225 |
00a962de1b09f6198c0d6137e0de0adbc2725c02 | 6,959 | py | Python | ipt/ipt_threshold_otsu_overthinked.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | 1 | 2020-06-30T06:53:36.000Z | 2020-06-30T06:53:36.000Z | ipt/ipt_threshold_otsu_overthinked.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | null | null | null | ipt/ipt_threshold_otsu_overthinked.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import logging
logger = logging.getLogger(__name__)
from ipso_phen.ipapi.base.ip_common import (
create_channel_generator,
get_hr_channel_name,
CHANNELS_FLAT,
)
from ipso_phen.ipapi.base.ipt_abstract import IptBase
from ipso_phen.ipapi.base.ip_common import ToolFamily
class IptOtsuOverthinked(IptBase):
def build_params(self):
self.add_combobox(
name="merge_method",
desc="Merge method:",
default_value="l_and",
values=dict(l_and="Logical AND", l_or="Logical OR"),
hint="Selected merge method",
)
self.add_label(name="lbl_channel", desc="Channels:")
choices_dict = dict(disabled="disabled", active="active", inverted="inverted")
for color_space, channel, channel_name in create_channel_generator(
("h", "s", "l", "a", "b", "rd", "gr", "bl")
):
self.add_combobox(
name=f"{channel}",
desc=f"Channel {channel_name} behavior:",
default_value="active",
values=choices_dict,
hint=f"Select channel {get_hr_channel_name(channel)} behavior",
)
self.add_checkbox(name="normalize", desc="Normalize channel", default_value=0)
self.add_separator(name="sep1")
self.add_morphology_operator()
self.add_separator(name="sep2")
self.add_combobox(
name="build_mosaic",
desc="Build mosaic",
default_value="no",
values=dict(
no="None",
channels="Channels and result in the middle",
sbs="Source and result side by side",
),
hint="Choose mosaic type to display",
)
def process_wrapper(self, **kwargs):
"""
Otsu overthinked:
Based on Otsu's binarization, uses a costum set of channels.
Real time: True
Keyword Arguments (in parentheses, argument name):
* Merge method: (merge_method): Selected merge method
* Channel hue behavior: (h): Select channel hsv: hue behavior
* Channel saturation behavior: (s): Select channel hsv: saturation behavior
* Channel lightness behavior: (l): Select channel lab: lightness behavior
* Channel a_green-red behavior: (a): Select channel lab: a_green-red behavior
* Channel b_blue-yellow behavior: (b): Select channel lab: b_blue-yellow behavior
* Channel red behavior: (rd): Select channel rgb: red behavior
* Channel green behavior: (gr): Select channel rgb: green behavior
* Channel blue behavior: (bl): Select channel rgb: blue behavior
* Normalize channel (normalize):
* Morphology operator (morph_op):
* Kernel size (kernel_size):
* Kernel shape (kernel_shape):
* Iterations (proc_times):
* Build mosaic (build_mosaic): Choose mosaic type to display
"""
wrapper = self.init_wrapper(**kwargs)
if wrapper is None:
return False
res = False
try:
normalize = self.get_value_of("normalize") == 1
build_mosaic = self.get_value_of("build_mosaic")
merge_method = self.get_value_of("merge_method")
masks = []
power_ = 0
wrapper.store_image(wrapper.current_image, "current_image")
mask = None
for p in self.gizmos:
if (p.name not in CHANNELS_FLAT) or (p.value == "disabled"):
continue
_, mask = cv2.threshold(
wrapper.get_channel(channel=p.name, normalize=normalize),
0,
255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU,
)
if p.value == "inverted":
mask = 255 - mask
wrapper.store_image(mask, f"Otsu_{get_hr_channel_name(p.name)}")
power_ += 1
masks.append(mask)
if masks:
if merge_method == "l_and":
mask = wrapper.multi_and(masks)
elif merge_method == "l_or":
mask = wrapper.multi_or(masks)
else:
logger.error("Unknown merge method")
return
self.result = self.apply_morphology_from_params(mask)
wrapper.store_image(self.result, "otsu_overthinked")
else:
img = wrapper.current_image
self.result = None
if (build_mosaic == "channels") and mask is not None:
canvas = wrapper.build_mosaic(
shape=(mask.shape[0] * 3, mask.shape[1] * 3, 3),
image_names=np.array(
[
[f"OTSU_{get_hr_channel_name(c)}" for c in ["h", "s", "l"]],
[
f'OTSU_{get_hr_channel_name("a")}',
"otsu_overthinked",
f'OTSU_{get_hr_channel_name("b")}',
],
[
f"OTSU_{get_hr_channel_name(c)}"
for c in ["rd", "gr", "bl"]
],
]
),
)
wrapper.store_image(canvas, "mosaic")
elif build_mosaic == "sbs":
canvas = wrapper.build_mosaic(
image_names=np.array(
[
"source",
"otsu_overthinked",
]
)
)
wrapper.store_image(canvas, "mosaic")
res = True
except Exception as e:
logger.error(f'Failed to process {self. name}: "{repr(e)}"')
res = False
else:
pass
finally:
return res
def apply_test_values_overrides(self, use_cases: tuple = ()):
if ToolFamily.THRESHOLD in use_cases:
self.set_value_of("merge_method", "l_or")
@property
def name(self):
return "Otsu overthinked"
@property
def real_time(self):
return True
@property
def result_name(self):
return "mask"
@property
def output_kind(self):
return "mask"
@property
def use_case(self):
return [ToolFamily.THRESHOLD]
@property
def description(self):
return "Based on Otsu's binarization, uses a costum set of channels."
| 36.820106 | 94 | 0.503233 |
6df4c86abd29b0823a5ca673cc3f4c210b7f8918 | 214 | py | Python | cyclerental/cyclerental/doctype/kinisi_cycle_rental/test_kinisi_cycle_rental.py | bala0321/cyclerental | f0ce73cbcbbbd511cca88bc08f0030871cf8b350 | [
"MIT"
] | null | null | null | cyclerental/cyclerental/doctype/kinisi_cycle_rental/test_kinisi_cycle_rental.py | bala0321/cyclerental | f0ce73cbcbbbd511cca88bc08f0030871cf8b350 | [
"MIT"
] | null | null | null | cyclerental/cyclerental/doctype/kinisi_cycle_rental/test_kinisi_cycle_rental.py | bala0321/cyclerental | f0ce73cbcbbbd511cca88bc08f0030871cf8b350 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Hari and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestKinisiCycleRental(unittest.TestCase):
pass
| 19.454545 | 47 | 0.766355 |
603b0e0d7b12e5a904d62a1b27eb4b3053463f5e | 552 | py | Python | cybox/objects/gui_object.py | Mattlk13/python-cybox | ee82c7da40ca4638e3ca8d70766150c0dace1b55 | [
"BSD-3-Clause"
] | 40 | 2015-03-05T18:22:51.000Z | 2022-03-06T07:29:25.000Z | cybox/objects/gui_object.py | Mattlk13/python-cybox | ee82c7da40ca4638e3ca8d70766150c0dace1b55 | [
"BSD-3-Clause"
] | 106 | 2015-01-12T18:52:20.000Z | 2021-04-25T22:57:52.000Z | cybox/objects/gui_object.py | Mattlk13/python-cybox | ee82c7da40ca4638e3ca8d70766150c0dace1b55 | [
"BSD-3-Clause"
] | 30 | 2015-03-25T07:24:40.000Z | 2021-07-23T17:10:11.000Z | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import fields
import cybox.bindings.gui_object as gui_binding
from cybox.common import ObjectProperties, Integer
class GUI(ObjectProperties):
_binding = gui_binding
_binding_class = gui_binding.GUIObjectType
_namespace = "http://cybox.mitre.org/objects#GUIObject-2"
_XSI_NS = "GUIObj"
_XSI_TYPE = "GUIObjectType"
height = fields.TypedField("Height", Integer)
width = fields.TypedField("Width", Integer)
| 29.052632 | 65 | 0.751812 |
0e03ea68223d42c1af9ea2d9930d32c1af27b27f | 6,870 | py | Python | parsedatetime/pdt_locales/fr_FR.py | iAmPlus/parsedatetime | a648c90843a9e941ce2b6d65719fbf7ad4ff22cb | [
"Apache-2.0"
] | null | null | null | parsedatetime/pdt_locales/fr_FR.py | iAmPlus/parsedatetime | a648c90843a9e941ce2b6d65719fbf7ad4ff22cb | [
"Apache-2.0"
] | null | null | null | parsedatetime/pdt_locales/fr_FR.py | iAmPlus/parsedatetime | a648c90843a9e941ce2b6d65719fbf7ad4ff22cb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .base import * # noqa
# don't use an unicode string
localeID = 'fr_FR'
dateSep = ['\/']
timeSep = [':', 'h']
meridian = ['du matin', 'du soir']
usesMeridian = True
uses24 = True
WeekdayOffsets = {}
MonthOffsets = {}
# always lowercase any lookup values - helper code expects that
Weekdays = [
'lundi', 'mardi', 'mercredi', 'jeudi',
'vendredi', 'samedi', 'dimanche',
]
shortWeekdays = [
'lun', 'mar', 'mer', 'jeu', 'ven', 'sam', 'dim',
]
Months = [
'janvier', 'février|fevrier', 'mars', 'avril', 'mai', 'juin', 'juillet',
'août|aout', 'septembre', 'octobre', 'novembre', 'décembre|decembre',
]
# We do not list 'mar' as a short name for 'mars' as it conflicts with
# the 'mar' of 'mardi'
shortMonths = [
'jan', 'fév|fev', 'mars', 'avr', 'mai', 'jui',
'juil', 'aoû|aou', 'sep', 'oct', 'nov', 'déc|dec',
]
# use the same formats as ICU by default
dateFormats = {
'full': 'EEEE d MMMM yyyy',
'long': 'd MMMM yyyy',
'medium': 'd MMM yyyy',
'short': 'd/M/yy'
}
timeFormats = {
'full': 'h:mm:ss a z',
'long': 'h:mm:ss a z',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
dp_order = ['d', 'm', 'y']
# Used to parse expressions like "in 5 hours"
numbers = collections.OrderedDict([
('cinquante', 50),
('soixante dix', 70),
('quatre vingt dix', 90),
('quatre vingts', 80),
('quatre vingt', 80),
('zéro', 0),
('zero', 0),
('un', 1),
('une', 1),
('deux', 2),
('trois', 3),
('quatre', 4),
('cinq', 5),
('six', 6),
('sept', 7),
('huit', 8),
('neuf', 9),
('dix', 10),
('onze', 11),
('douze', 12),
('treize', 13),
('quatorze', 14),
('quinze', 15),
('seize', 16),
('vingt', 20),
('trente', 30),
('quarante', 40),
('soixante', 60),
('cent', 100),
])
decimal_mark = ','
# this will be added to re_values later
units = {
'seconds': ['seconde', 'secondes', 'sec', 's'],
'minutes': ['minute', 'minutes', 'min', 'mn'],
'hours': ['heure', 'heures', 'h'],
'days': ['jours', 'jour', 'journée', 'journee', 'journées', 'journees', 'j'],
'weeks': ['semaine', 'semaines', 'sem'],
'months': ['mois', 'm'],
'years': ['année', 'l\'année', 'annee', 'l\'annee', 'an', 'années', 'annees', 'ans'],
}
# text constants to be used by later regular expressions
re_values = {
'specials': 'à|a|le|la|du|de',
'timeseparator': '(?:\:|h|\s*heures?\s*)',
'of': None, # "eg. 3rd of march"
'rangeseparator': '-|a|à',
'daysuffix': 'ième|ieme|ème|eme|ère|ere|nde',
'meridian': None,
'qunits': 'h|m|s|j|sem|a',
'now': ['maintenant', 'tout de suite', 'immédiatement', 'immediatement', 'à l\'instant', 'a l\'instant'],
'after': r'après|apres|il\sy\sa|plus\stard',
'from': r'à\spartir|a\spartir|à\spartir\sde|a\spartir\sde|à\spartir\sd\'|a\spartir\sd\'|de|d\'', # num unit from rel
'this': 'ce|cette',
'next': 'prochaine|prochain|prochains|prochaines',
'last': 'dernière|derniere|dernier|dernières|dernieres|derniers',
'in': 'dans', # "in 5 days"
'since': 'depuis', # since time, since date, since num unit
}
# Used to adjust the returned date before/after the source
Modifiers = {
'avant': -1,
'il y a': -1,
'plus tot': -1,
'plus tôt': -1,
'y a': -1,
'antérieur': -1,
'antérieurs': -1,
'anterieur': -1,
'anterieurs': -1,
'dernier': -1,
'derniers': -1,
'dernière': -1,
'dernières': -1,
'derniere': -1,
'dernieres': -1,
'précédent': -1,
'précedent': -1,
'precédent': -1,
'precedent': -1,
'précédents': -1,
'précedents': -1,
'precédents': -1,
'precedents': -1,
'fin de': 0,
'fin du': 0,
'fin de la': 0,
'fin des': 0,
'fin d\'': 0,
'ce': 0,
'ces': 0,
'cette': 0,
'cettes': 0,
'depuis': 1,
'dans': 1,
'à partir': 1,
'a partir': 1,
'après': 1,
'apres': 1,
'lendemain': 1,
'prochain': 1,
'prochains': 1,
'prochaine': 1,
'prochaines': 1,
'suivant': 1,
'suivante': 1,
'plus tard': 1
}
dayOffsets = {
'après-demain': 2,
'apres-demain': 2,
'après demain': 2,
'apres demain': 2,
'demain': 1,
'aujourd\'hui': 0,
'hier': -1,
'avant-hier': -2,
'avant hier': -2
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = {
'après-midi': {'hr': 13, 'mn': 0, 'sec': 0},
'apres-midi': {'hr': 13, 'mn': 0, 'sec': 0},
'après midi': {'hr': 13, 'mn': 0, 'sec': 0},
'apres midi': {'hr': 13, 'mn': 0, 'sec': 0},
'aprèm': {'hr': 13, 'mn': 0, 'sec': 0},
'aprem': {'hr': 13, 'mn': 0, 'sec': 0},
'midi': {'hr': 12, 'mn': 0, 'sec': 0},
'déjeuner': {'hr': 12, 'mn': 0, 'sec': 0},
'dejeuner': {'hr': 12, 'mn': 0, 'sec': 0},
'matin': {'hr': 6, 'mn': 0, 'sec': 0},
'petit-déjeuner': {'hr': 8, 'mn': 0, 'sec': 0},
'petit-dejeuner': {'hr': 8, 'mn': 0, 'sec': 0},
'petit déjeuner': {'hr': 8, 'mn': 0, 'sec': 0},
'petit dejeuner': {'hr': 8, 'mn': 0, 'sec': 0},
'diner': {'hr': 19, 'mn': 0, 'sec': 0},
'dîner': {'hr': 19, 'mn': 0, 'sec': 0},
'soir': {'hr': 18, 'mn': 0, 'sec': 0},
'soirée': {'hr': 18, 'mn': 0, 'sec': 0},
'soiree': {'hr': 18, 'mn': 0, 'sec': 0},
'minuit': {'hr': 0, 'mn': 0, 'sec': 0},
'nuit': {'hr': 21, 'mn': 0, 'sec': 0},
}
small = {
'zéro': 0,
'zero': 0,
'un': 1,
'une': 1,
'deux': 2,
'trois': 3,
'quatre': 4,
'cinq': 5,
'six': 6,
'sept': 7,
'huit': 8,
'neuf': 9,
'dix': 10,
'onze': 11,
'douze': 12,
'treize': 13,
'quatorze': 14,
'quinze': 15,
'seize': 16,
'dix-sept': 17,
'dix sept': 17,
'dix-huit': 18,
'dix huit': 18,
'dix-neuf': 19,
'dix neuf': 19,
'vingt': 20,
'vingt-et-un': 21,
'vingt et un': 21,
'trente': 30,
'quarante': 40,
'cinquante': 50,
'soixante': 60,
'soixante-dix': 70,
'soixante dix': 70,
'quatre-vingt': 80,
'quatre vingt': 80,
'quatre-vingt-dix': 90,
'quatre vingt dix': 90
}
magnitude = {
'mille': 1000,
'millier': 1000,
'million': 1000000,
'milliard': 1000000000,
'trillion': 1000000000000,
'quadrillion': 1000000000000000,
'quintillion': 1000000000000000000,
'sextillion': 1000000000000000000000,
'septillion': 1000000000000000000000000,
'octillion': 1000000000000000000000000000,
'nonillion': 1000000000000000000000000000000,
'décillion': 1000000000000000000000000000000000,
'decillion': 1000000000000000000000000000000000,
}
ignore = ('et', ',', 'le', 'la', 'l\'')
| 25.730337 | 120 | 0.518777 |
13d1f50c4780862c4c652a5981b36538d82124c5 | 94 | py | Python | fifth/languages.py | pakoito/randomcharacter | 4a3302b5ae157ccebb5e9a7e616315626ad4c614 | [
"MIT"
] | 65 | 2015-02-04T13:50:00.000Z | 2022-03-27T03:32:08.000Z | fifth/languages.py | pakoito/randomcharacter | 4a3302b5ae157ccebb5e9a7e616315626ad4c614 | [
"MIT"
] | 14 | 2015-02-04T04:00:02.000Z | 2021-04-09T13:53:07.000Z | fifth/languages.py | pakoito/randomcharacter | 4a3302b5ae157ccebb5e9a7e616315626ad4c614 | [
"MIT"
] | 39 | 2015-01-28T21:38:09.000Z | 2022-01-04T15:46:47.000Z | LANGUAGES = ['Common', 'Dwarfish', 'Elvish', 'Giant', 'Gnomish', 'Goblin', 'Halfling', 'Orc']
| 47 | 93 | 0.617021 |
b5e53cb8e9b4046acd0bc04b15cc326cf7e18126 | 1,380 | py | Python | src/base_image_preprocessor.py | elangovana/object-tracking | a9359ac3e3926102f9998eb20500746343e14826 | [
"Apache-2.0"
] | 1 | 2019-12-17T01:17:01.000Z | 2019-12-17T01:17:01.000Z | src/base_image_preprocessor.py | elangovana/object-tracking | a9359ac3e3926102f9998eb20500746343e14826 | [
"Apache-2.0"
] | 2 | 2021-09-08T01:37:46.000Z | 2022-03-12T00:13:53.000Z | src/base_image_preprocessor.py | elangovana/object-tracking | a9359ac3e3926102f9998eb20500746343e14826 | [
"Apache-2.0"
] | null | null | null | # *****************************************************************************
# * Copyright 2019 Amazon.com, Inc. and its affiliates. All Rights Reserved. *
# *
# Licensed under the Amazon Software License (the "License"). *
# You may not use this file except in compliance with the License. *
# A copy of the License is located at *
# *
# http://aws.amazon.com/asl/ *
# *
# or in the "license" file accompanying this file. This file is distributed *
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either *
# express or implied. See the License for the specific language governing *
# permissions and limitations under the License. *
# *****************************************************************************
class BaseImagePreprocessor:
def __call__(self, image_path, image_width, image_height, boxes):
"""
:type image_width: object
:returns: Transformed object and the new bounding boxes
"""
raise NotImplementedError
| 51.111111 | 79 | 0.427536 |
5b453e30f25fe25f5970714b34f07c8c77d2af31 | 141 | py | Python | sip/tango_control/tango_mysql/_version.py | SKA-ScienceDataProcessor/integration-prototype | 5875dc0489f707232534ce75daf3707f909bcd15 | [
"BSD-3-Clause"
] | 3 | 2016-11-08T02:27:05.000Z | 2018-01-22T13:26:11.000Z | sip/tango_control/tango_mysql/_version.py | SKA-ScienceDataProcessor/integration-prototype | 5875dc0489f707232534ce75daf3707f909bcd15 | [
"BSD-3-Clause"
] | 87 | 2016-11-24T11:09:01.000Z | 2021-03-25T22:23:59.000Z | sip/tango_control/tango_mysql/_version.py | SKA-ScienceDataProcessor/integration-prototype | 5875dc0489f707232534ce75daf3707f909bcd15 | [
"BSD-3-Clause"
] | 10 | 2016-05-18T09:41:36.000Z | 2019-07-04T10:19:24.000Z | # -*- coding: utf-8 -*-
"""SIP Tango MySQL image version."""
__version_info__ = (1, 0, 3)
__version__ = '.'.join(map(str, __version_info__))
| 28.2 | 50 | 0.64539 |
399199d0a16c984571355c3cc9686ab19908ab42 | 3,255 | py | Python | examples/reports/report_core_compay.py | Evergreen2020/zvt | 446a2512d716a38a12164b6d4468a6c9de01b986 | [
"MIT"
] | null | null | null | examples/reports/report_core_compay.py | Evergreen2020/zvt | 446a2512d716a38a12164b6d4468a6c9de01b986 | [
"MIT"
] | null | null | null | examples/reports/report_core_compay.py | Evergreen2020/zvt | 446a2512d716a38a12164b6d4468a6c9de01b986 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import time
import eastmoneypy
from apscheduler.schedulers.background import BackgroundScheduler
from examples.factors.fundamental_selector import FundamentalSelector
from examples.reports import subscriber_emails
from zvdata.api import get_entities
from zvdata.utils.time_utils import now_pd_timestamp, to_time_str
from zvt import init_log
from zvt.domain import Stock, StockTradeDay
from zvt.factors.target_selector import TargetSelector
from zvt.informer.informer import EmailInformer
logger = logging.getLogger(__name__)
sched = BackgroundScheduler()
# 基本面选股 每周一次即可 基本无变化
@sched.scheduled_job('cron', hour=16, minute=0, day_of_week='6')
def report_core_company():
while True:
error_count = 0
email_action = EmailInformer()
try:
StockTradeDay.record_data(provider='joinquant')
# Stock.record_data(provider='joinquant')
# FinanceFactor.record_data(provider='eastmoney')
# BalanceSheet.record_data(provider='eastmoney')
latest_day: StockTradeDay = StockTradeDay.query_data(order=StockTradeDay.timestamp.desc(), limit=1,
return_type='domain')
if latest_day:
target_date = latest_day[0].timestamp
else:
target_date = now_pd_timestamp()
my_selector: TargetSelector = FundamentalSelector(start_timestamp='2015-01-01', end_timestamp=target_date)
my_selector.run()
long_targets = my_selector.get_open_long_targets(timestamp=target_date)
if long_targets:
stocks = get_entities(provider='joinquant', entity_schema=Stock, entity_ids=long_targets,
return_type='domain')
# add them to eastmoney
try:
try:
eastmoneypy.del_group('core')
except:
pass
eastmoneypy.create_group('core')
for stock in stocks:
eastmoneypy.add_to_group(stock.code, group_name='core')
except Exception as e:
email_action.send_message("5533061@qq.com", f'report_core_company error',
'report_core_company error:{}'.format(e))
info = [f'{stock.name}({stock.code})' for stock in stocks]
msg = ' '.join(info)
else:
msg = 'no targets'
logger.info(msg)
email_action.send_message(subscriber_emails, f'{to_time_str(target_date)} 核心资产选股结果', msg)
break
except Exception as e:
logger.exception('report_core_company error:{}'.format(e))
time.sleep(60 * 3)
error_count = error_count + 1
if error_count == 10:
email_action.send_message("5533061@qq.com", f'report_core_company error',
'report_core_company error:{}'.format(e))
if __name__ == '__main__':
init_log('report_core_company.log')
report_core_company()
sched.start()
sched._thread.join()
| 36.573034 | 118 | 0.602458 |
4d7fb53af7a25956637c36f4bea09b657292cdef | 262 | py | Python | SatPlot/is_leap_year.py | lff5985/share-from-zhao | b1a6e3513db10e6da18ed6884d4fab9fb68e51b4 | [
"MIT"
] | 2 | 2018-06-13T02:27:22.000Z | 2020-12-27T09:55:50.000Z | SatPlot/is_leap_year.py | lff5985/share-from-zhao | b1a6e3513db10e6da18ed6884d4fab9fb68e51b4 | [
"MIT"
] | null | null | null | SatPlot/is_leap_year.py | lff5985/share-from-zhao | b1a6e3513db10e6da18ed6884d4fab9fb68e51b4 | [
"MIT"
] | 2 | 2016-11-09T14:06:30.000Z | 2019-06-01T02:46:15.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def is_leap_year(year):
if year % 400 == 0:
return True
elif year % 4 == 0:
if year % 100 != 0:
return True
else:
return False
else:
return False
| 18.714286 | 27 | 0.477099 |
81232ef53d4d84e972c379e78c78b22bf189139e | 3,629 | py | Python | UnPackIps/UnPackIps.py | cober2019/Network-Automation | 796b7760ca1f1e496a841c613eaff05ddba71b16 | [
"MIT"
] | 48 | 2019-08-22T19:42:46.000Z | 2022-03-22T07:05:02.000Z | UnPackIps/UnPackIps.py | muhammad-rafi/Network-Automation | 856b4390250b602e5350706666a33e290ea7afef | [
"MIT"
] | 2 | 2019-09-04T15:42:14.000Z | 2020-05-07T15:21:20.000Z | UnPackIps/UnPackIps.py | muhammad-rafi/Network-Automation | 856b4390250b602e5350706666a33e290ea7afef | [
"MIT"
] | 16 | 2020-01-22T20:21:36.000Z | 2021-12-25T06:08:48.000Z | """Helper functions to deal with IP ranges. Also can be used a ping tool with logging"""
import ipaddress
import subprocess as sp
import logging
import time
logging.basicConfig(filename='ping.log', level=logging.INFO)
def _diagnose(ip, status) -> logging:
"""Parse ping results. Print and save to ping.log"""
for i in status[1].split("\n"):
try:
if i.split()[0] == "Reply":
print(
f"{ip} up | Latency: {i.split()[4].split('=')[1]} Time: {time.strftime('%a, %d %b %Y %H:%M:%S +0000')}")
logging.info(
f"\n {ip} up | Latency: {i.split()[4].split('=')[1]} Time: {time.strftime('%a, %d %b %Y %H:%M:%S +0000')}")
elif i.split()[0] == "Ping" or "Packets" or "Approximate" or "Minimum":
pass
if i.split()[0] == "Request":
print(
f"!!! {ip} Resquest timed out. | Status: Down | Time: {time.strftime('%a, %d %b %Y %H:%M:%S +0000')} !!!")
logging.info(
f"\n !!! {ip} Resquest timed out. | Status: Down | Time: {time.strftime('%a, %d %b %Y %H:%M:%S +0000')} !!!")
except IndexError:
pass
def _is_ip(ip):
"""Check for IP address."""
valid_ip = False
try:
ipaddress.ip_address(ip)
except ValueError:
print(f"Not a valid IP address {ip}")
return valid_ip
def _get_subnets(user_input):
# Remove space to avoind ip address errors
ips = user_input.replace(" ", "")
"""Private method for getting subnet and fourth octet info."""
ip_details = []
for subnet in ips.split('|'):
octets = subnet.split('.')
subnet = '.'.join(octets[0:-1])
ranges = octets[3]
if len(octets) != 4:
ValueError("There was more than 4 octets found: " + ips)
ip_details.append({"subnet": subnet, "ranges": ranges})
return ip_details
def _mixrange(short_hand):
"""Working with splitting on on command and dashes."""
ranges = []
# Taken from https://stackoverflow.com/a/18759797
for item in short_hand.split(','):
if '-' not in item:
ranges.append(int(item))
else:
l, h = map(int, item.split('-'))
ranges += range(l, h + 1)
return ranges
def get_ips(user_input: str = None) -> list:
"""Take in the user input and split twice using parsing methods for '|', ', and ','."""
ip_details = _get_subnets(user_input)
expanded_ip = []
check_overlap = {}
for item in ip_details:
fourth_octets = _mixrange(item['ranges'])
for fourth_octet in fourth_octets:
ip = item['subnet'] + '.' + str(fourth_octet)
# Verify if there is an IP address, skip ip is function return False
valid_ip = _is_ip(ip)
if valid_ip is False:
continue
# Check if this IP converted to an integer has been found already, using the dictionary
# to determine if this IP was seen already.
if check_overlap.get(int(ipaddress.IPv4Address(ip))):
raise ValueError("Overlapping IP: " + ip)
check_overlap[int(ipaddress.IPv4Address(ip))] = True
expanded_ip.append(ip)
return expanded_ip
def ping(user_input: str = None):
ips = get_ips(user_input)
for i in ips:
ping_ips = sp.getstatusoutput("ping " + i)
_diagnose(i, ping_ips)
print("\n")
| 32.693694 | 139 | 0.538991 |
1d2d65476b1a9ee624c21bbf299e974dcf5251d4 | 881 | py | Python | Hash Table/561. Array Partition I.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | 138 | 2020-02-08T05:25:26.000Z | 2021-11-04T11:59:28.000Z | Hash Table/561. Array Partition I.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | null | null | null | Hash Table/561. Array Partition I.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | 24 | 2021-01-02T07:18:43.000Z | 2022-03-20T08:17:54.000Z | """
561. Array Partition I
Given an array of 2n integers, your task is to group these integers into n pairs of integer,
say (a1, b1), (a2, b2), ..., (an, bn) which makes sum of min(ai, bi) for all i from 1 to n as large as possible.
Input: [1,4,3,2]
Output: 4
Explanation: n is 2, and the maximum sum of pairs is 4 = min(1, 2) + min(3, 4).
"""
class Solution:
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
li = [0]*20001
for i in nums:
li[i+10000]+=1
r = res = 0
for i, v in enumerate(li):
res += (v - r + 1)//2*(i-10000) # 4 // 2 =
r = (v + r) % 2
return res
class Solution:
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sum((sorted(nums))[::2]) | 23.184211 | 112 | 0.502838 |
23d93e875ff5aba177bdcfef2180c62347420097 | 77 | py | Python | shouqianba/__init__.py | kogwang/mongo- | 2249a5a3363398e799e250be1e93f9aaef143210 | [
"Apache-2.0"
] | null | null | null | shouqianba/__init__.py | kogwang/mongo- | 2249a5a3363398e799e250be1e93f9aaef143210 | [
"Apache-2.0"
] | null | null | null | shouqianba/__init__.py | kogwang/mongo- | 2249a5a3363398e799e250be1e93f9aaef143210 | [
"Apache-2.0"
] | null | null | null | from .client import ShouqianbaClient
from .log import *
from . import config
| 19.25 | 36 | 0.792208 |
4c4b18075d1203ce238cf0655cbeb4ff221e1860 | 88,137 | py | Python | src/sentry/south_migrations/0356_auto__add_useridentity__add_unique_useridentity_user_identity__add_ide.py | apragacz/sf-sentry | 2fdd6c1195c29a1d401d1cd538c22ea68556699a | [
"BSD-3-Clause"
] | 1 | 2018-03-05T15:40:12.000Z | 2018-03-05T15:40:12.000Z | src/sentry/south_migrations/0356_auto__add_useridentity__add_unique_useridentity_user_identity__add_ide.py | pkaminski/sentry | 27e948283e27d93ca5192ca7b580830e092c25c7 | [
"BSD-3-Clause"
] | 1 | 2018-08-22T16:49:48.000Z | 2018-08-22T16:49:48.000Z | src/sentry/south_migrations/0356_auto__add_useridentity__add_unique_useridentity_user_identity__add_ide.py | pkaminski/sentry | 27e948283e27d93ca5192ca7b580830e092c25c7 | [
"BSD-3-Clause"
] | 1 | 2018-07-02T09:46:44.000Z | 2018-07-02T09:46:44.000Z | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = False
def forwards(self, orm):
# Adding model 'UserIdentity'
db.create_table('sentry_useridentity', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('user', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.User'])),
('identity', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.Identity'])),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['UserIdentity'])
# Adding unique constraint on 'UserIdentity', fields ['user', 'identity']
db.create_unique('sentry_useridentity', ['user_id', 'identity_id'])
# Adding model 'IdentityProvider'
db.create_table('sentry_identityprovider', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=64)),
('instance', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('sentry', ['IdentityProvider'])
# Adding unique constraint on 'IdentityProvider', fields ['type', 'instance']
db.create_unique('sentry_identityprovider', ['type', 'instance'])
# Adding model 'Identity'
db.create_table('sentry_identity', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('idp', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.IdentityProvider'])),
('external_id', self.gf('django.db.models.fields.CharField')(max_length=64)),
('data', self.gf('sentry.db.models.fields.encrypted.EncryptedJsonField')(default={})),
('status', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0)),
('scopes', self.gf('sentry.db.models.fields.array.ArrayField')(
of=('django.db.models.fields.TextField', [], {}))),
('date_verified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['Identity'])
# Adding unique constraint on 'Identity', fields ['idp', 'external_id']
db.create_unique('sentry_identity', ['idp_id', 'external_id'])
def backwards(self, orm):
# Removing unique constraint on 'Identity', fields ['idp', 'external_id']
db.delete_unique('sentry_identity', ['idp_id', 'external_id'])
# Removing unique constraint on 'IdentityProvider', fields ['type', 'instance']
db.delete_unique('sentry_identityprovider', ['type', 'instance'])
# Removing unique constraint on 'UserIdentity', fields ['user', 'identity']
db.delete_unique('sentry_useridentity', ['user_id', 'identity_id'])
# Deleting model 'UserIdentity'
db.delete_table('sentry_useridentity')
# Deleting model 'IdentityProvider'
db.delete_table('sentry_identityprovider')
# Deleting model 'Identity'
db.delete_table('sentry_identity')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'268164e6fcdc4d8b85fea2791dccbd3a9c164caa4d11488fb3b1ccf0180d4f02'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'590b6318a3b34f08b9d50699aa45af263495967b739f47bb8bdaac2235cc3df0'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Maximum Monitor'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'73fe50092c474375a24b2e3cc0076923'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 9, 22, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 10, 22, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'5ec7a02df94d4a85a46d81e1cca5050729f6fc90de174d798bf0b16da58a1dc9'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'26313a1b1efd4785a885935a1c79424adb26b8f7a7bd4758bdaa8240a36c4709'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 9, 29, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'),)", 'object_name': 'Identity'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'instance'),)", 'object_name': 'IdentityProvider'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 10, 22, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'6534d8da9670456db4c154230fa948f9'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'Vd6RY3QZ3pur23ueng3ZJdZbKT1y3VaJ'", 'max_length': '32'})
},
'sentry.useridentity': {
'Meta': {'unique_together': "(('user', 'identity'),)", 'object_name': 'UserIdentity'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'identity': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Identity']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry']
| 89.388438 | 233 | 0.591091 |
9a3a27dd7e173a560bbe1090579941be81b87f3f | 17,640 | py | Python | myven/lib/python3.8/site-packages/ansible/modules/cloud/amazon/redshift.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2021-04-02T08:08:39.000Z | 2021-04-02T08:08:39.000Z | myven/lib/python3.8/site-packages/ansible/modules/cloud/amazon/redshift.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | null | null | null | myven/lib/python3.8/site-packages/ansible/modules/cloud/amazon/redshift.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2020-05-03T01:13:16.000Z | 2020-05-03T01:13:16.000Z | #!/usr/bin/python
# Copyright 2014 Jens Carl, Hothead Games Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- "Jens Carl (@j-carl), Hothead Games Inc."
module: redshift
version_added: "2.2"
short_description: create, delete, or modify an Amazon Redshift instance
description:
- Creates, deletes, or modifies amazon Redshift cluster instances.
options:
command:
description:
- Specifies the action to take.
required: true
choices: [ 'create', 'facts', 'delete', 'modify' ]
identifier:
description:
- Redshift cluster identifier.
required: true
node_type:
description:
- The node type of the cluster. Must be specified when command=create.
choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dc2.large', 'dc2.8xlarge',
'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge']
username:
description:
- Master database username. Used only when command=create.
password:
description:
- Master database password. Used only when command=create.
cluster_type:
description:
- The type of cluster.
choices: ['multi-node', 'single-node' ]
default: 'single-node'
db_name:
description:
- Name of the database.
default: null
availability_zone:
description:
- availability zone in which to launch cluster
aliases: ['zone', 'aws_zone']
number_of_nodes:
description:
- Number of nodes. Only used when cluster_type=multi-node.
default: null
cluster_subnet_group_name:
description:
- which subnet to place the cluster
aliases: ['subnet']
cluster_security_groups:
description:
- in which security group the cluster belongs
default: null
aliases: ['security_groups']
vpc_security_group_ids:
description:
- VPC security group
aliases: ['vpc_security_groups']
default: null
skip_final_cluster_snapshot:
description:
- skip a final snapshot before deleting the cluster. Used only when command=delete.
aliases: ['skip_final_snapshot']
default: false
version_added: "2.4"
final_cluster_snapshot_identifier:
description:
- identifier of the final snapshot to be created before deleting the cluster. If this parameter is provided,
final_cluster_snapshot_identifier must be false. Used only when command=delete.
aliases: ['final_snapshot_id']
default: null
version_added: "2.4"
preferred_maintenance_window:
description:
- maintenance window
aliases: ['maintance_window', 'maint_window']
default: null
cluster_parameter_group_name:
description:
- name of the cluster parameter group
aliases: ['param_group_name']
default: null
automated_snapshot_retention_period:
description:
- period when the snapshot take place
aliases: ['retention_period']
default: null
port:
description:
- which port the cluster is listining
default: null
cluster_version:
description:
- which version the cluster should have
aliases: ['version']
choices: ['1.0']
default: null
allow_version_upgrade:
description:
- flag to determinate if upgrade of version is possible
aliases: ['version_upgrade']
default: true
publicly_accessible:
description:
- if the cluster is accessible publicly or not
default: false
encrypted:
description:
- if the cluster is encrypted or not
default: false
elastic_ip:
description:
- if the cluster has an elastic IP or not
default: null
new_cluster_identifier:
description:
- Only used when command=modify.
aliases: ['new_identifier']
default: null
wait:
description:
- When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be
terminated.
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
requirements: [ 'boto' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Basic cluster provisioning example
- redshift: >
command=create
node_type=ds1.xlarge
identifier=new_cluster
username=cluster_admin
password=1nsecure
# Cluster delete example
- redshift:
command: delete
identifier: new_cluster
skip_final_cluster_snapshot: true
wait: true
'''
RETURN = '''
cluster:
description: dictionary containing all the cluster information
returned: success
type: complex
contains:
identifier:
description: Id of the cluster.
returned: success
type: string
sample: "new_redshift_cluster"
create_time:
description: Time of the cluster creation as timestamp.
returned: success
type: float
sample: 1430158536.308
status:
description: Stutus of the cluster.
returned: success
type: string
sample: "available"
db_name:
description: Name of the database.
returned: success
type: string
sample: "new_db_name"
availability_zone:
description: Amazon availability zone where the cluster is located.
returned: success
type: string
sample: "us-east-1b"
maintenance_window:
description: Time frame when maintenance/upgrade are done.
returned: success
type: string
sample: "sun:09:30-sun:10:00"
private_ip_address:
description: Private IP address of the main node.
returned: success
type: string
sample: "10.10.10.10"
public_ip_address:
description: Public IP address of the main node.
returned: success
type: string
sample: "0.0.0.0"
port:
description: Port of the cluster.
returned: success
type: int
sample: 5439
url:
description: FQDN of the main cluster node.
returned: success
type: string
sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
'''
import time
try:
import boto.exception
import boto.redshift
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def _collect_facts(resource):
"""Transfrom cluster information to dict."""
facts = {
'identifier': resource['ClusterIdentifier'],
'create_time': resource['ClusterCreateTime'],
'status': resource['ClusterStatus'],
'username': resource['MasterUsername'],
'db_name': resource['DBName'],
'availability_zone': resource['AvailabilityZone'],
'maintenance_window': resource['PreferredMaintenanceWindow'],
'url': resource['Endpoint']['Address'],
'port': resource['Endpoint']['Port']
}
for node in resource['ClusterNodes']:
if node['NodeRole'] in ('SHARED', 'LEADER'):
facts['private_ip_address'] = node['PrivateIPAddress']
facts['public_ip_address'] = node['PublicIPAddress']
break
return facts
def create_cluster(module, redshift):
"""
Create a new cluster
module: AnsibleModule object
redshift: authenticated redshift connection object
Returns:
"""
identifier = module.params.get('identifier')
node_type = module.params.get('node_type')
username = module.params.get('username')
password = module.params.get('password')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
changed = True
# Package up the optional parameters
params = {}
for p in ('db_name', 'cluster_type', 'cluster_security_groups',
'vpc_security_group_ids', 'cluster_subnet_group_name',
'availability_zone', 'preferred_maintenance_window',
'cluster_parameter_group_name',
'automated_snapshot_retention_period', 'port',
'cluster_version', 'allow_version_upgrade',
'number_of_nodes', 'publicly_accessible',
'encrypted', 'elastic_ip', 'enhanced_vpc_routing'):
if p in module.params:
params[p] = module.params.get(p)
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
changed = False
except boto.exception.JSONResponseError as e:
try:
redshift.create_cluster(identifier, node_type, username, password, **params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(changed, _collect_facts(resource))
def describe_cluster(module, redshift):
"""
Collect data about the cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(True, _collect_facts(resource))
def delete_cluster(module, redshift):
"""
Delete a cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot')
final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier')
try:
redshift.delete_cluster(
identifier,
skip_final_cluster_snapshot,
final_cluster_snapshot_identifier
)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(True, {})
def modify_cluster(module, redshift):
"""
Modify an existing cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
# Package up the optional parameters
params = {}
for p in ('cluster_type', 'cluster_security_groups',
'vpc_security_group_ids', 'cluster_subnet_group_name',
'availability_zone', 'preferred_maintenance_window',
'cluster_parameter_group_name',
'automated_snapshot_retention_period', 'port', 'cluster_version',
'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier',
'enhanced_vpc_routing'):
if p in module.params:
params[p] = module.params.get(p)
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
try:
redshift.modify_cluster(identifier, **params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
# https://github.com/boto/boto/issues/2776 is fixed.
module.fail_json(msg=str(e))
return(True, _collect_facts(resource))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
identifier=dict(required=True),
node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large',
'dc2.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large',
'dw2.8xlarge'], required=False),
username=dict(required=False),
password=dict(no_log=True, required=False),
db_name=dict(require=False),
cluster_type=dict(choices=['multi-node', 'single-node', ], default='single-node'),
cluster_security_groups=dict(aliases=['security_groups'], type='list'),
vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'),
skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], type='bool', default=False),
final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
cluster_subnet_group_name=dict(aliases=['subnet']),
availability_zone=dict(aliases=['aws_zone', 'zone']),
preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
cluster_parameter_group_name=dict(aliases=['param_group_name']),
automated_snapshot_retention_period=dict(aliases=['retention_period']),
port=dict(type='int'),
cluster_version=dict(aliases=['version'], choices=['1.0']),
allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
number_of_nodes=dict(type='int'),
publicly_accessible=dict(type='bool', default=False),
encrypted=dict(type='bool', default=False),
elastic_ip=dict(required=False),
new_cluster_identifier=dict(aliases=['new_identifier']),
enhanced_vpc_routing=dict(type='bool', default=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
))
required_if = [
('command', 'delete', ['skip_final_cluster_snapshot']),
('skip_final_cluster_snapshot', False, ['final_cluster_snapshot_identifier'])
]
module = AnsibleModule(
argument_spec=argument_spec,
required_if=required_if
)
if not HAS_BOTO:
module.fail_json(msg='boto v2.9.0+ required for this module')
command = module.params.get('command')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
# connect to the rds endpoint
try:
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
changed = True
if command == 'create':
(changed, cluster) = create_cluster(module, conn)
elif command == 'facts':
(changed, cluster) = describe_cluster(module, conn)
elif command == 'delete':
(changed, cluster) = delete_cluster(module, conn)
elif command == 'modify':
(changed, cluster) = modify_cluster(module, conn)
module.exit_json(changed=changed, cluster=cluster)
if __name__ == '__main__':
main()
| 34.186047 | 153 | 0.655782 |
d8e947f98717b74d5ac3f81a751cba07b4c74f3d | 1,081 | py | Python | lineage/figures/figure12.py | meyer-lab/tHMM | 22e51a2035e76c39a2b1355d1e44ad9ed977dfd4 | [
"MIT"
] | 1 | 2022-03-17T21:05:23.000Z | 2022-03-17T21:05:23.000Z | lineage/figures/figure12.py | meyer-lab/tHMM | 22e51a2035e76c39a2b1355d1e44ad9ed977dfd4 | [
"MIT"
] | 310 | 2020-07-08T14:14:08.000Z | 2022-03-23T18:04:57.000Z | lineage/figures/figure12.py | meyer-lab/tHMM | 22e51a2035e76c39a2b1355d1e44ad9ed977dfd4 | [
"MIT"
] | 1 | 2020-12-21T04:54:56.000Z | 2020-12-21T04:54:56.000Z | """ This file depicts the distribution of phase lengths versus the states for each concentration of gemcitabine. """
import pickle
from string import ascii_lowercase
from .figureCommon import getSetup, subplotLabel, plot_all
from ..plotTree import plot_networkx
concs = ["control", "gemcitabine 5 nM", "gemcitabine 10 nM", "gemcitabine 30 nM"]
concsValues = ["control", "5 nM", "10 nM", "30 nM"]
pik1 = open("gemcitabines.pkl", "rb")
gemc_tHMMobj_list = []
for i in range(4):
gemc_tHMMobj_list.append(pickle.load(pik1))
T_gem = gemc_tHMMobj_list[0].estimate.T
num_states = gemc_tHMMobj_list[0].num_states
def makeFigure():
""" Makes figure 12. """
ax, f = getSetup((17, 7.5), (2, 7))
plot_all(ax, num_states, gemc_tHMMobj_list, "Gemcitabine", concs, concsValues)
for i in range(3, 7):
ax[i].set_title(concs[i - 3], fontsize=16)
ax[i].text(-0.2, 1.25, ascii_lowercase[i - 2], transform=ax[i].transAxes, fontsize=16, fontweight="bold", va="top")
ax[i].axis('off')
return f
# plot_networkx(T_gem.shape[0], T_gem, 'gemcitabine')
| 33.78125 | 123 | 0.691027 |
e3718eec76a6057b2bfc858af653e6a142ffa1dd | 8,731 | py | Python | usaspending_api/etl/award_helpers.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | 1 | 2021-06-17T05:09:00.000Z | 2021-06-17T05:09:00.000Z | usaspending_api/etl/award_helpers.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | null | null | null | usaspending_api/etl/award_helpers.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | null | null | null | from django.db import connection
from usaspending_api.awards.models import Transaction
def update_awards(award_tuple=None):
"""
Awards can have one or more transactions. We maintain some information
on the award model that needs to be updated as its child transactions
change. For example, an award's total obligated amount represents the
summary of its transaction's obligated amounts. Another example is a
series of fields (award type, awarding agency, etc.) that will always
be set to the value of the Award's most recent transaction.
This function keeps those awards fields synced with child transactions.
Obviously the raw SQL is not ideal. That said, the complex update
of award fields based on the earliest, latest, and aggregate values
of the child transactions was problematic to do in a set-based way
via the ORM. These updates do need to be set-based, as looping through
and updating individual award records would be an ETL bottleneck.
"""
# common table expression for each award's latest transaction
sql_txn_latest = (
'txn_latest AS ('
'SELECT DISTINCT ON (award_id) * '
'FROM transaction ')
if award_tuple:
sql_txn_latest += 'WHERE award_id IN %s '
sql_txn_latest += 'ORDER BY award_id, action_date DESC) '
# common table expression for each award's earliest transaction
sql_txn_earliest = (
'txn_earliest AS ('
'SELECT DISTINCT ON (award_id) * '
'FROM transaction ')
if award_tuple:
sql_txn_earliest += 'WHERE award_id IN %s '
sql_txn_earliest += 'ORDER BY award_id, action_date) '
# common table expression for each award's summarized data
# (currently the only we summarize is federal_actio_obligation,
# but we can add more as necessar)
sql_txn_totals = (
'txn_totals AS ('
'SELECT award_id, SUM(federal_action_obligation) AS total_obligation '
'FROM transaction ')
if award_tuple:
sql_txn_totals += 'WHERE award_id IN %s '
sql_txn_totals += 'GROUP BY award_id) '
# construct a sql query that uses the common table expressions
# defined above and joins each of them to their corresopnding
# award. the joined data from earliest, latest, and summarized
# transactions are used to update awards fields as appropriate
sql_update = 'WITH {}, {}, {}'.format(sql_txn_latest, sql_txn_earliest, sql_txn_totals)
sql_update += (
'UPDATE awards a '
'SET awarding_agency_id = l.awarding_agency_id, '
'certified_date = l.certified_date, '
'data_source = l.data_source, '
'date_signed = e.action_date, '
'description = e.description, '
'funding_agency_id = l.funding_agency_id, '
'last_modified_date = l.last_modified_date, '
'period_of_performance_current_end_date = l.period_of_performance_current_end_date, '
'period_of_performance_start_date = e.period_of_performance_start_date, '
'place_of_performance_id = l.place_of_performance_id, '
'recipient_id = l.recipient_id, '
'total_obligation = t.total_obligation, '
'latest_transaction_id = l.id, '
'type = l.type, '
'type_description = l.type_description '
'FROM txn_earliest e '
'JOIN txn_latest l '
'ON e.award_id = l.award_id '
'JOIN txn_totals t '
'ON l.award_id = t.award_id '
'WHERE t.award_id = a.id'
)
with connection.cursor() as cursor:
# If another expression is added and includes %s, you must add the tuple
# for that string interpolation to this list (even if it uses the same one!)
cursor.execute(sql_update, [award_tuple, award_tuple, award_tuple])
rows = cursor.rowcount
return rows
def update_contract_awards(award_tuple=None):
"""Update contract-specific award data based on the info in child transactions."""
# sum the potential_total_value_of_award from contract_data for an award
sql_txn_totals = (
'txn_totals AS ('
'SELECT tx.award_id, SUM(potential_total_value_of_award) AS total_potential_award '
'FROM transaction_contract INNER JOIN transaction as tx on '
'transaction_contract.transaction_id = tx.id ')
if award_tuple:
sql_txn_totals += 'WHERE tx.award_id IN %s '
sql_txn_totals += 'GROUP BY tx.award_id) '
# construct a sql query that uses the latest txn contract common table
# expression above and joins it to the corresopnding
# award. that joined data is used to update awards fields as appropriate
# (currently, there's only one trasnaction_contract field that trickles
# up and updates an award record: potential_total_value_of_award)
sql_update = 'WITH {}'.format(sql_txn_totals)
sql_update += (
'UPDATE awards a '
'SET potential_total_value_of_award = t.total_potential_award '
'FROM txn_totals t '
'WHERE t.award_id = a.id'
)
with connection.cursor() as cursor:
# If another expression is added and includes %s, you must add the tuple
# for that string interpolation to this list (even if it uses the same one!)
cursor.execute(sql_update, [award_tuple])
rows = cursor.rowcount
return rows
def update_award_subawards(award_tuple=None):
"""
Updates awards' subaward counts and totals
"""
# Sum and count subaward_amounts
sql_sub_totals = (
'subaward_totals AS ('
'SELECT award_id, SUM(amount) AS total_subaward_amount, COUNT(*) AS subaward_count '
'FROM awards_subaward ')
if award_tuple:
sql_sub_totals += 'WHERE award_id IN %s '
sql_sub_totals += 'GROUP BY award_id) '
# Construct the SQL update
sql_update = 'WITH {}'.format(sql_sub_totals)
sql_update += (
'UPDATE awards '
'SET total_subaward_amount = subaward_totals.total_subaward_amount, '
'subaward_count = subaward_totals.subaward_count '
'FROM subaward_totals '
'WHERE subaward_totals.award_id = id'
)
with connection.cursor() as cursor:
# If another expression is added and includes %s, you must add the tuple
# for that string interpolation to this list (even if it uses the same one!)
cursor.execute(sql_update, [award_tuple])
rows = cursor.rowcount
return rows
def get_award_financial_transaction(
toptier_agency_cgac, piid=None, parent_award_id=None, fain=None, uri=None):
"""
For specified award financial (aka "File C") data, try to find a matching
transaction (aka "File D"). We sometimes need to do this because File C
doesn't always have the level of award/transaction specificity that we
want, so we try to find a matching File D record to grab the additional
information.
For example, when trying to match award financial information to an
award record, we need the awarding subtier agency, which isn't supplied
on File C. Thus, we'll use this function to find a File D record and
use the subtier agency information supplied there.
If we find more than one match, return the record with this most
recent action date.
Args:
toptier_agency_cgac: top tier agency code (aka CGAC code) from File C
piid: piid from File C (contract awards only)
parent_award_id: parent award id from File C (contract awards only)
fain: fain from File C (assistance awards only)
uri: uri from File C (assistance awards only)
Returns:
A Transaction model instance
"""
# if both fain and uri are supplied as paramaters, look up by fain first
incoming_fain = fain
incoming_uri = uri
if incoming_fain is not None and incoming_uri is not None:
uri = None
txn = Transaction.objects.filter(
awarding_agency__toptier_agency__cgac_code=toptier_agency_cgac,
contract_data__piid=piid,
contract_data__parent_award_id=parent_award_id,
assistance_data__fain=fain,
assistance_data__uri=uri) \
.order_by('-action_date').first()
if txn is None and incoming_fain is not None and incoming_uri is not None:
# we didn't find a match and both fain and uri were supplied
# as parameters, now try searching by uri
uri = incoming_uri
txn = Transaction.objects.filter(
awarding_agency__toptier_agency__cgac_code=toptier_agency_cgac,
contract_data__piid=piid,
contract_data__parent_award_id=parent_award_id,
assistance_data__fain=None,
assistance_data__uri=uri) \
.order_by('-action_date').first()
return txn
| 41.379147 | 93 | 0.689268 |
248d7ff3528a2c941d2ccd7d74449d95ccb76e4d | 955 | py | Python | my_python_module/backup/longest_common_subsequence.py | a358003542/wanze_python_project | db52515af80319000e9a47a7b02f3ccd2cf46afd | [
"MIT"
] | 1 | 2020-10-30T08:54:22.000Z | 2020-10-30T08:54:22.000Z | my_python_module/backup/longest_common_subsequence.py | a358003542/wanze_python_project | db52515af80319000e9a47a7b02f3ccd2cf46afd | [
"MIT"
] | null | null | null | my_python_module/backup/longest_common_subsequence.py | a358003542/wanze_python_project | db52515af80319000e9a47a7b02f3ccd2cf46afd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*-coding:utf-8-*-
"""
动态规划法解最长公共子序列问题,经典案例,还是很实用的。
子序列不一定要求递增的,更多的是比较相似度
"""
import pandas as pd
def longest_common_subsequence(seq_one, seq_two):
df = pd.DataFrame(index=[item for item in seq_one], columns=[item for item in seq_two])
df = df.fillna(0)
for i, c1 in enumerate(seq_one):
for j, c2 in enumerate(seq_two):
if c1 == c2:
if (i - 1 < 0) or (j - 1 < 0):
df.iloc[i][j] = 1
else:
df.iloc[i][j] = df.iloc[i - 1][j - 1] + 1
else:
if i < 1 and j < 1:
df.iloc[i][j] = 0
elif i < 1:
df.iloc[i][j] = max(0, df.iloc[i][j - 1])
elif j < 1:
df.iloc[i][j] = max(df.iloc[i - 1][j], 0)
else:
df.iloc[i][j] = max(df.iloc[i - 1][j], df.iloc[i][j - 1])
print(df)
| 24.487179 | 91 | 0.436649 |
1c3365e3b522bc4f34c72ef66f784cc0cc49dbf1 | 10,574 | py | Python | tools/harness-automation/autothreadharness/open_thread_controller.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | 1 | 2020-08-12T06:15:53.000Z | 2020-08-12T06:15:53.000Z | tools/harness-automation/autothreadharness/open_thread_controller.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | null | null | null | tools/harness-automation/autothreadharness/open_thread_controller.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import re
import socket
import threading
import time
import serial
from . import settings
__all__ = ['OpenThreadController']
logger = logging.getLogger(__name__)
linesepx = re.compile(r'\r\n|\n')
class OpenThreadController(threading.Thread):
"""This is an simple wrapper to communicate with openthread"""
_lock = threading.Lock()
viewing = False
def __init__(self, port, log=False):
"""Initialize the controller
Args:
port (str): serial port's path or name(windows)
"""
super(OpenThreadController, self).__init__()
self.port = port
self.handle = None
self.lines = []
self._log = log
self._is_net = False
self._init()
def _init(self):
self._connect()
if not self._log:
return
self.start()
def __del__(self):
self.close()
def close(self):
if self.is_alive():
self.viewing = False
self.join()
self._close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _close(self):
if self.handle:
self.handle.close()
self.handle = None
def _connect(self):
logger.debug('My port is %s', self.port)
if self.port.startswith('NET'):
portnum = settings.SER2NET_PORTBASE + int(self.port.split('NET')[1])
logger.debug('My port num is %d', portnum)
address = (settings.SER2NET_HOSTNAME, portnum)
self.handle = socket.create_connection(address)
self.handle.setblocking(0)
self._is_net = True
elif ':' in self.port:
host, port = self.port.split(':')
self.handle = socket.create_connection((host, port))
self.handle.setblocking(0)
self._is_net = True
else:
self.handle = serial.Serial(self.port, 115200, timeout=0, xonxoff=True)
self._is_net = False
def _read(self, size=512):
if self._is_net:
return self.handle.recv(size)
else:
return self.handle.read(size)
def _write(self, data):
if self._is_net:
self.handle.sendall(data)
else:
self.handle.write(data)
def _expect(self, expected, times=50):
"""Find the `expected` line within `times` trials.
Args:
expected str: the expected string
times int: number of trials
"""
logger.debug('[%s] Expecting [%s]', self.port, expected)
retry_times = 10
while times:
if not retry_times:
break
line = self._readline()
if line == expected:
return
if not line:
retry_times -= 1
time.sleep(0.1)
times -= 1
raise Exception('failed to find expected string[%s]' % expected)
def _readline(self):
"""Read exactly one line from the device, nonblocking.
Returns:
None on no data
"""
if len(self.lines) > 1:
return self.lines.pop(0)
tail = ''
if len(self.lines):
tail = self.lines.pop()
try:
tail += self._read()
except socket.error:
logging.exception('No new data')
time.sleep(0.1)
self.lines += linesepx.split(tail)
if len(self.lines) > 1:
return self.lines.pop(0)
def _sendline(self, line):
"""Send exactly one line to the device
Args:
line str: data send to device
"""
self.lines = []
try:
self._read()
except socket.error:
logging.debug('Nothing cleared')
logger.debug('sending [%s]', line)
self._write(line + '\r\n')
# wait for write to complete
time.sleep(0.5)
def _req(self, req):
"""Send command and wait for response.
The command will be repeated 3 times at most in case data loss of serial port.
Args:
req (str): Command to send, please do not include new line in the end.
Returns:
[str]: The output lines
"""
logger.debug('DUT> %s', req)
self._log and self.pause()
times = 3
res = None
while times:
times = times - 1
try:
self._sendline(req)
self._expect(req)
line = None
res = []
while True:
line = self._readline()
logger.debug('Got line %s', line)
if line == 'Done':
break
if line:
res.append(line)
break
except BaseException:
logger.exception('Failed to send command')
self.close()
self._init()
self._log and self.resume()
return res
def run(self):
"""Threading callback"""
self.viewing = True
while self.viewing and self._lock.acquire():
try:
line = self._readline()
except BaseException:
pass
else:
logger.info(line)
self._lock.release()
time.sleep(0)
def is_started(self):
"""check if openthread is started
Returns:
bool: started or not
"""
state = self._req('state')[0]
return state != 'disabled'
def start(self):
"""Start openthread
"""
self._req('ifconfig up')
self._req('thread start')
def stop(self):
"""Stop openthread
"""
self._req('thread stop')
self._req('ifconfig down')
def reset(self):
"""Reset openthread device, not equivalent to stop and start
"""
logger.debug('DUT> reset')
self._log and self.pause()
self._sendline('reset')
self._read()
self._log and self.resume()
def resume(self):
"""Start dumping logs"""
self._lock.release()
def pause(self):
"""Start dumping logs"""
self._lock.acquire()
@property
def networkname(self):
"""str: Thread network name."""
return self._req('networkname')[0]
@networkname.setter
def networkname(self, value):
self._req('networkname %s' % value)
@property
def mode(self):
"""str: Thread mode."""
return self._req('mode')[0]
@mode.setter
def mode(self, value):
self._req('mode %s' % value)
@property
def mac(self):
"""str: MAC address of the device"""
return self._req('extaddr')[0]
@property
def addrs(self):
"""[str]: IP addresses of the devices"""
return self._req('ipaddr')
@property
def short_addr(self):
"""str: Short address"""
return self._req('rloc16')[0]
@property
def channel(self):
"""int: Channel number of openthread"""
return int(self._req('channel')[0])
@channel.setter
def channel(self, value):
self._req('channel %d' % value)
@property
def panid(self):
"""str: Thread panid"""
return self._req('panid')[0]
@panid.setter
def panid(self, value):
self._req('panid %s' % value)
@property
def extpanid(self):
"""str: Thread extpanid"""
return self._req('extpanid')[0]
@extpanid.setter
def extpanid(self, value):
self._req('extpanid %s' % value)
@property
def child_timeout(self):
"""str: Thread child timeout in seconds"""
return self._req('childtimeout')[0]
@child_timeout.setter
def child_timeout(self, value):
self._req('childtimeout %d' % value)
@property
def version(self):
"""str: Open thread version"""
return self._req('version')[0]
def add_prefix(self, prefix, flags, prf):
"""Add network prefix.
Args:
prefix (str): network prefix.
flags (str): network prefix flags, please refer thread documentation for details
prf (str): network prf, please refer thread documentation for details
"""
self._req('prefix add %s %s %s' % (prefix, flags, prf))
time.sleep(1)
self._req('netdataregister')
def remove_prefix(self, prefix):
"""Remove network prefix.
"""
self._req('prefix remove %s' % prefix)
time.sleep(1)
self._req('netdataregister')
def enable_blacklist(self):
"""Enable blacklist feature"""
self._req('blacklist enable')
def add_blacklist(self, mac):
"""Add a mac address to blacklist"""
self._req('blacklist add %s' % mac)
| 27.322997 | 92 | 0.56847 |
3f47d072719757aeca4ddb311581726820c95a14 | 3,304 | py | Python | hubspot/cms/blogs/tags/models/forward_paging.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | hubspot/cms/blogs/tags/models/forward_paging.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | hubspot/cms/blogs/tags/models/forward_paging.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Blog Post endpoints
\"Use these endpoints for interacting with Blog Posts, Blog Authors, and Blog Tags\" # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.cms.blogs.tags.configuration import Configuration
class ForwardPaging(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"next": "NextPage"}
attribute_map = {"next": "next"}
def __init__(self, next=None, local_vars_configuration=None): # noqa: E501
"""ForwardPaging - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._next = None
self.discriminator = None
if next is not None:
self.next = next
@property
def next(self):
"""Gets the next of this ForwardPaging. # noqa: E501
:return: The next of this ForwardPaging. # noqa: E501
:rtype: NextPage
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this ForwardPaging.
:param next: The next of this ForwardPaging. # noqa: E501
:type: NextPage
"""
self._next = next
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ForwardPaging):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ForwardPaging):
return True
return self.to_dict() != other.to_dict()
| 27.764706 | 102 | 0.562349 |
69a5da6e8a59d2d7b68cc021f273fcfb5de04a33 | 4,685 | py | Python | AI.py | sourabhjo7/ZEST.IO | bb53ef98c39b76a5fae14914b395fd0b8fffc999 | [
"MIT"
] | 2 | 2021-05-07T16:55:20.000Z | 2021-06-28T10:18:23.000Z | AI.py | sourabhjo7/ZEST.IO | bb53ef98c39b76a5fae14914b395fd0b8fffc999 | [
"MIT"
] | null | null | null | AI.py | sourabhjo7/ZEST.IO | bb53ef98c39b76a5fae14914b395fd0b8fffc999 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import math
global cap
faceCascade = cv2.CascadeClassifier('Face.xml')
def duke(x,y,w,h,frame):
area=w*h
font = cv2.FONT_HERSHEY_SIMPLEX
if area>20000 and area<30000:
cv2.putText(frame, 'Go Back!', (0, 450), font, 2, (255, 0, 0), 3, cv2.LINE_AA)
return 0
if area>30000:
cv2.putText(frame, 'Penalty+1', (0, 450), font, 2, (255, 0, 0), 3, cv2.LINE_AA)
return 1
def update(frame):
su=0
flag=False
frame = cv2.flip(frame, 1)
try:
cv2.line(frame, (350, 478), (350, 0), (0, 0, 255), 2)
cv2.line(frame, (550, 478), (550, 0), (0, 0, 255), 2)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
for (x1, y1, w1, h1) in faces:
cv2.rectangle(frame, (x1, y1), (x1 + w1, y1 + h1), (255, 0,0), 2)
flag=duke(x1, y1, w1, h1, frame)
if x1 <350:
su=6
if x1+w1>550:
su=7
kernel = np.ones((3, 3), np.uint8)
x=100
y=100
h=200
w=200
roi = frame[y:y+h, x:x+w]
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
lower_skin = np.array([0, 20, 70], dtype=np.uint8)
upper_skin = np.array([20, 255, 255], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_skin, upper_skin)
mask = cv2.dilate(mask, kernel, iterations=4)
mask = cv2.GaussianBlur(mask, (5, 5), 100)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt = max(contours, key=lambda x: cv2.contourArea(x))
epsilon = 0.0005 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
hull = cv2.convexHull(cnt)
areahull = cv2.contourArea(hull)
areacnt = cv2.contourArea(cnt)
arearatio = ((areahull - areacnt) / areacnt) * 100
hull = cv2.convexHull(approx, returnPoints=False)
defects = cv2.convexityDefects(approx, hull)
l = 0
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
start = tuple(approx[s][0])
end = tuple(approx[e][0])
far = tuple(approx[f][0])
pt = (100, 180)
a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
s = (a + b + c) / 2
ar = math.sqrt(s * (s - a) * (s - b) * (s - c))
d = (2 * ar) / a
angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) * 57
if angle <= 90 and d > 30:
l += 1
cv2.circle(roi, far, 3, [255, 0, 0], -1)
cv2.line(roi, start, end, [0, 255, 0], 2)
l += 1
font = cv2.FONT_HERSHEY_SIMPLEX
if l == 1:
if areacnt < 2000:
pass
else:
if arearatio < 12:
pass
cv2.putText(frame, 'moving hand', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
else:
su = 1
cv2.putText(frame, 'option 1', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
elif l == 2:
su = 2
cv2.putText(frame, 'option 2', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
elif l == 3:
su = 3
cv2.putText(frame, 'option 3', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
elif l == 4:
su = 4
cv2.putText(frame, 'option 4', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
elif l == 5:
su = 5
cv2.putText(frame, 'Deselect All', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
else:
pass
except:
cv2.line(frame, (350, 478), (350, 0), (0, 0, 255), 2)
cv2.line(frame, (550, 478), (550, 0), (0, 0, 255), 2)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
for (x1, y1, w1, h1) in faces:
cv2.rectangle(frame, (x1, y1), (x1 + w1, y1 + h1), (255, 0,0), 2)
flag=duke(x1, y1, w1, h1, frame)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
k = cv2.waitKey(5) & 0xFF
return su,flag,frame
| 36.889764 | 100 | 0.476414 |
eb6b85809da9e3e5760d2ddbd47cce7e907ac91b | 30,227 | py | Python | auditlog_tests/tests.py | washdrop/django-auditlog | b0717a52d3883a03f0f0ddcc7b5329924a81c423 | [
"MIT"
] | 252 | 2020-09-23T13:32:49.000Z | 2022-03-29T18:38:59.000Z | auditlog_tests/tests.py | washdrop/django-auditlog | b0717a52d3883a03f0f0ddcc7b5329924a81c423 | [
"MIT"
] | 121 | 2020-09-23T12:56:39.000Z | 2022-03-31T06:59:09.000Z | auditlog_tests/tests.py | washdrop/django-auditlog | b0717a52d3883a03f0f0ddcc7b5329924a81c423 | [
"MIT"
] | 89 | 2020-09-25T07:22:52.000Z | 2022-03-29T07:59:35.000Z | import datetime
import json
import django
from dateutil.tz import gettz
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.models import AnonymousUser, User
from django.core.exceptions import ValidationError
from django.db.models.signals import pre_save
from django.http import HttpResponse
from django.test import RequestFactory, TestCase
from django.utils import dateformat, formats, timezone
from auditlog.middleware import AuditlogMiddleware
from auditlog.models import LogEntry
from auditlog.registry import auditlog
from auditlog_tests.models import (
AdditionalDataIncludedModel,
AltPrimaryKeyModel,
CharfieldTextfieldModel,
ChoicesFieldModel,
DateTimeFieldModel,
ManyRelatedModel,
NoDeleteHistoryModel,
PostgresArrayFieldModel,
ProxyModel,
RelatedModel,
SimpleExcludeModel,
SimpleIncludeModel,
SimpleMappingModel,
SimpleModel,
UUIDPrimaryKeyModel,
)
class SimpleModelTest(TestCase):
def setUp(self):
self.obj = SimpleModel.objects.create(text="I am not difficult.")
def test_create(self):
"""Creation is logged correctly."""
# Get the object to work with
obj = self.obj
# Check for log entries
self.assertTrue(obj.history.count() == 1, msg="There is one log entry")
try:
history = obj.history.get()
except obj.history.DoesNotExist:
self.assertTrue(False, "Log entry exists")
else:
self.assertEqual(
history.action, LogEntry.Action.CREATE, msg="Action is 'CREATE'"
)
self.assertEqual(
history.object_repr, str(obj), msg="Representation is equal"
)
def test_update(self):
"""Updates are logged correctly."""
# Get the object to work with
obj = self.obj
# Change something
obj.boolean = True
obj.save()
# Check for log entries
self.assertTrue(
obj.history.filter(action=LogEntry.Action.UPDATE).count() == 1,
msg="There is one log entry for 'UPDATE'",
)
history = obj.history.get(action=LogEntry.Action.UPDATE)
self.assertJSONEqual(
history.changes,
'{"boolean": ["False", "True"]}',
msg="The change is correctly logged",
)
def test_delete(self):
"""Deletion is logged correctly."""
# Get the object to work with
obj = self.obj
history = obj.history.latest()
# Delete the object
obj.delete()
# Check for log entries
self.assertTrue(
LogEntry.objects.filter(
content_type=history.content_type,
object_pk=history.object_pk,
action=LogEntry.Action.DELETE,
).count()
== 1,
msg="There is one log entry for 'DELETE'",
)
def test_recreate(self):
SimpleModel.objects.all().delete()
self.setUp()
self.test_create()
class AltPrimaryKeyModelTest(SimpleModelTest):
def setUp(self):
self.obj = AltPrimaryKeyModel.objects.create(
key=str(datetime.datetime.now()), text="I am strange."
)
class UUIDPrimaryKeyModelModelTest(SimpleModelTest):
def setUp(self):
self.obj = UUIDPrimaryKeyModel.objects.create(text="I am strange.")
def test_get_for_object(self):
self.obj.boolean = True
self.obj.save()
self.assertEqual(LogEntry.objects.get_for_object(self.obj).count(), 2)
def test_get_for_objects(self):
self.obj.boolean = True
self.obj.save()
self.assertEqual(
LogEntry.objects.get_for_objects(UUIDPrimaryKeyModel.objects.all()).count(),
2,
)
class ProxyModelTest(SimpleModelTest):
def setUp(self):
self.obj = ProxyModel.objects.create(text="I am not what you think.")
class ManyRelatedModelTest(TestCase):
"""
Test the behaviour of a many-to-many relationship.
"""
def setUp(self):
self.obj = ManyRelatedModel.objects.create()
self.rel_obj = ManyRelatedModel.objects.create()
self.obj.related.add(self.rel_obj)
def test_related(self):
self.assertEqual(
LogEntry.objects.get_for_objects(self.obj.related.all()).count(),
self.rel_obj.history.count(),
)
self.assertEqual(
LogEntry.objects.get_for_objects(self.obj.related.all()).first(),
self.rel_obj.history.first(),
)
class MiddlewareTest(TestCase):
"""
Test the middleware responsible for connecting and disconnecting the signals used in automatic logging.
"""
def setUp(self):
self.middleware = AuditlogMiddleware()
self.factory = RequestFactory()
self.user = User.objects.create_user(
username="test", email="test@example.com", password="top_secret"
)
def test_request_anonymous(self):
"""No actor will be logged when a user is not logged in."""
# Create a request
request = self.factory.get("/")
request.user = AnonymousUser()
# Run middleware
self.middleware.process_request(request)
# Validate result
self.assertFalse(pre_save.has_listeners(LogEntry))
# Finalize transaction
self.middleware.process_exception(request, None)
def test_request(self):
"""The actor will be logged when a user is logged in."""
# Create a request
request = self.factory.get("/")
request.user = self.user
# Run middleware
self.middleware.process_request(request)
# Validate result
self.assertTrue(pre_save.has_listeners(LogEntry))
# Finalize transaction
self.middleware.process_exception(request, None)
def test_response(self):
"""The signal will be disconnected when the request is processed."""
# Create a request
request = self.factory.get("/")
request.user = self.user
# Run middleware
self.middleware.process_request(request)
self.assertTrue(
pre_save.has_listeners(LogEntry)
) # The signal should be present before trying to disconnect it.
self.middleware.process_response(request, HttpResponse())
# Validate result
self.assertFalse(pre_save.has_listeners(LogEntry))
def test_exception(self):
"""The signal will be disconnected when an exception is raised."""
# Create a request
request = self.factory.get("/")
request.user = self.user
# Run middleware
self.middleware.process_request(request)
self.assertTrue(
pre_save.has_listeners(LogEntry)
) # The signal should be present before trying to disconnect it.
self.middleware.process_exception(request, ValidationError("Test"))
# Validate result
self.assertFalse(pre_save.has_listeners(LogEntry))
class SimpeIncludeModelTest(TestCase):
"""Log only changes in include_fields"""
def test_register_include_fields(self):
sim = SimpleIncludeModel(label="Include model", text="Looong text")
sim.save()
self.assertTrue(sim.history.count() == 1, msg="There is one log entry")
# Change label, record
sim.label = "Changed label"
sim.save()
self.assertTrue(sim.history.count() == 2, msg="There are two log entries")
# Change text, ignore
sim.text = "Short text"
sim.save()
self.assertTrue(sim.history.count() == 2, msg="There are two log entries")
class SimpeExcludeModelTest(TestCase):
"""Log only changes that are not in exclude_fields"""
def test_register_exclude_fields(self):
sem = SimpleExcludeModel(label="Exclude model", text="Looong text")
sem.save()
self.assertTrue(sem.history.count() == 1, msg="There is one log entry")
# Change label, ignore
sem.label = "Changed label"
sem.save()
self.assertTrue(sem.history.count() == 2, msg="There are two log entries")
# Change text, record
sem.text = "Short text"
sem.save()
self.assertTrue(sem.history.count() == 2, msg="There are two log entries")
class SimpleMappingModelTest(TestCase):
"""Diff displays fields as mapped field names where available through mapping_fields"""
def test_register_mapping_fields(self):
smm = SimpleMappingModel(
sku="ASD301301A6", vtxt="2.1.5", not_mapped="Not mapped"
)
smm.save()
self.assertTrue(
smm.history.latest().changes_dict["sku"][1] == "ASD301301A6",
msg="The diff function retains 'sku' and can be retrieved.",
)
self.assertTrue(
smm.history.latest().changes_dict["not_mapped"][1] == "Not mapped",
msg="The diff function does not map 'not_mapped' and can be retrieved.",
)
self.assertTrue(
smm.history.latest().changes_display_dict["Product No."][1]
== "ASD301301A6",
msg="The diff function maps 'sku' as 'Product No.' and can be retrieved.",
)
self.assertTrue(
smm.history.latest().changes_display_dict["Version"][1] == "2.1.5",
msg=(
"The diff function maps 'vtxt' as 'Version' through verbose_name"
" setting on the model field and can be retrieved."
),
)
self.assertTrue(
smm.history.latest().changes_display_dict["not mapped"][1] == "Not mapped",
msg=(
"The diff function uses the django default verbose name for 'not_mapped'"
" and can be retrieved."
),
)
class AdditionalDataModelTest(TestCase):
"""Log additional data if get_additional_data is defined in the model"""
def test_model_without_additional_data(self):
obj_wo_additional_data = SimpleModel.objects.create(
text="No additional " "data"
)
obj_log_entry = obj_wo_additional_data.history.get()
self.assertIsNone(obj_log_entry.additional_data)
def test_model_with_additional_data(self):
related_model = SimpleModel.objects.create(text="Log my reference")
obj_with_additional_data = AdditionalDataIncludedModel(
label="Additional data to log entries", related=related_model
)
obj_with_additional_data.save()
self.assertTrue(
obj_with_additional_data.history.count() == 1, msg="There is 1 log entry"
)
log_entry = obj_with_additional_data.history.get()
# FIXME: Work-around for the fact that additional_data isn't working
# on Django 3.1 correctly (see https://github.com/jazzband/django-auditlog/issues/266)
if django.VERSION >= (3, 1):
extra_data = json.loads(log_entry.additional_data)
else:
extra_data = log_entry.additional_data
self.assertIsNotNone(extra_data)
self.assertTrue(
extra_data["related_model_text"] == related_model.text,
msg="Related model's text is logged",
)
self.assertTrue(
extra_data["related_model_id"] == related_model.id,
msg="Related model's id is logged",
)
class DateTimeFieldModelTest(TestCase):
"""Tests if DateTimeField changes are recognised correctly"""
utc_plus_one = timezone.get_fixed_timezone(datetime.timedelta(hours=1))
now = timezone.now()
def test_model_with_same_time(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(
label="DateTimeField model",
timestamp=timestamp,
date=date,
time=time,
naive_dt=self.now,
)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to same datetime and timezone
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
dtm.timestamp = timestamp
dtm.date = datetime.date(2017, 1, 10)
dtm.time = datetime.time(12, 0)
dtm.save()
# Nothing should have changed
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
def test_model_with_different_timezone(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(
label="DateTimeField model",
timestamp=timestamp,
date=date,
time=time,
naive_dt=self.now,
)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to same datetime in another timezone
timestamp = datetime.datetime(2017, 1, 10, 13, 0, tzinfo=self.utc_plus_one)
dtm.timestamp = timestamp
dtm.save()
# Nothing should have changed
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
def test_model_with_different_datetime(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(
label="DateTimeField model",
timestamp=timestamp,
date=date,
time=time,
naive_dt=self.now,
)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to another datetime in the same timezone
timestamp = datetime.datetime(2017, 1, 10, 13, 0, tzinfo=timezone.utc)
dtm.timestamp = timestamp
dtm.save()
# The time should have changed.
self.assertTrue(dtm.history.count() == 2, msg="There are two log entries")
def test_model_with_different_date(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(
label="DateTimeField model",
timestamp=timestamp,
date=date,
time=time,
naive_dt=self.now,
)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to another datetime in the same timezone
date = datetime.datetime(2017, 1, 11)
dtm.date = date
dtm.save()
# The time should have changed.
self.assertTrue(dtm.history.count() == 2, msg="There are two log entries")
def test_model_with_different_time(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(
label="DateTimeField model",
timestamp=timestamp,
date=date,
time=time,
naive_dt=self.now,
)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to another datetime in the same timezone
time = datetime.time(6, 0)
dtm.time = time
dtm.save()
# The time should have changed.
self.assertTrue(dtm.history.count() == 2, msg="There are two log entries")
def test_model_with_different_time_and_timezone(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(
label="DateTimeField model",
timestamp=timestamp,
date=date,
time=time,
naive_dt=self.now,
)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to another datetime and another timezone
timestamp = datetime.datetime(2017, 1, 10, 14, 0, tzinfo=self.utc_plus_one)
dtm.timestamp = timestamp
dtm.save()
# The time should have changed.
self.assertTrue(dtm.history.count() == 2, msg="There are two log entries")
def test_changes_display_dict_datetime(self):
timestamp = datetime.datetime(2017, 1, 10, 15, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(
label="DateTimeField model",
timestamp=timestamp,
date=date,
time=time,
naive_dt=self.now,
)
dtm.save()
localized_timestamp = timestamp.astimezone(gettz(settings.TIME_ZONE))
self.assertTrue(
dtm.history.latest().changes_display_dict["timestamp"][1]
== dateformat.format(localized_timestamp, settings.DATETIME_FORMAT),
msg=(
"The datetime should be formatted according to Django's settings for"
" DATETIME_FORMAT"
),
)
timestamp = timezone.now()
dtm.timestamp = timestamp
dtm.save()
localized_timestamp = timestamp.astimezone(gettz(settings.TIME_ZONE))
self.assertTrue(
dtm.history.latest().changes_display_dict["timestamp"][1]
== dateformat.format(localized_timestamp, settings.DATETIME_FORMAT),
msg=(
"The datetime should be formatted according to Django's settings for"
" DATETIME_FORMAT"
),
)
# Change USE_L10N = True
with self.settings(USE_L10N=True, LANGUAGE_CODE="en-GB"):
self.assertTrue(
dtm.history.latest().changes_display_dict["timestamp"][1]
== formats.localize(localized_timestamp),
msg=(
"The datetime should be formatted according to Django's settings for"
" USE_L10N is True with a different LANGUAGE_CODE."
),
)
def test_changes_display_dict_date(self):
timestamp = datetime.datetime(2017, 1, 10, 15, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(
label="DateTimeField model",
timestamp=timestamp,
date=date,
time=time,
naive_dt=self.now,
)
dtm.save()
self.assertTrue(
dtm.history.latest().changes_display_dict["date"][1]
== dateformat.format(date, settings.DATE_FORMAT),
msg=(
"The date should be formatted according to Django's settings for"
" DATE_FORMAT unless USE_L10N is True."
),
)
date = datetime.date(2017, 1, 11)
dtm.date = date
dtm.save()
self.assertTrue(
dtm.history.latest().changes_display_dict["date"][1]
== dateformat.format(date, settings.DATE_FORMAT),
msg=(
"The date should be formatted according to Django's settings for"
" DATE_FORMAT unless USE_L10N is True."
),
)
# Change USE_L10N = True
with self.settings(USE_L10N=True, LANGUAGE_CODE="en-GB"):
self.assertTrue(
dtm.history.latest().changes_display_dict["date"][1]
== formats.localize(date),
msg=(
"The date should be formatted according to Django's settings for"
" USE_L10N is True with a different LANGUAGE_CODE."
),
)
def test_changes_display_dict_time(self):
timestamp = datetime.datetime(2017, 1, 10, 15, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(
label="DateTimeField model",
timestamp=timestamp,
date=date,
time=time,
naive_dt=self.now,
)
dtm.save()
self.assertTrue(
dtm.history.latest().changes_display_dict["time"][1]
== dateformat.format(time, settings.TIME_FORMAT),
msg=(
"The time should be formatted according to Django's settings for"
" TIME_FORMAT unless USE_L10N is True."
),
)
time = datetime.time(6, 0)
dtm.time = time
dtm.save()
self.assertTrue(
dtm.history.latest().changes_display_dict["time"][1]
== dateformat.format(time, settings.TIME_FORMAT),
msg=(
"The time should be formatted according to Django's settings for"
" TIME_FORMAT unless USE_L10N is True."
),
)
# Change USE_L10N = True
with self.settings(USE_L10N=True, LANGUAGE_CODE="en-GB"):
self.assertTrue(
dtm.history.latest().changes_display_dict["time"][1]
== formats.localize(time),
msg=(
"The time should be formatted according to Django's settings for"
" USE_L10N is True with a different LANGUAGE_CODE."
),
)
def test_update_naive_dt(self):
timestamp = datetime.datetime(2017, 1, 10, 15, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(
label="DateTimeField model",
timestamp=timestamp,
date=date,
time=time,
naive_dt=self.now,
)
dtm.save()
# Change with naive field doesnt raise error
dtm.naive_dt = timezone.make_naive(timezone.now(), timezone=timezone.utc)
dtm.save()
class UnregisterTest(TestCase):
def setUp(self):
auditlog.unregister(SimpleModel)
self.obj = SimpleModel.objects.create(text="No history")
def tearDown(self):
# Re-register for future tests
auditlog.register(SimpleModel)
def test_unregister_create(self):
"""Creation is not logged after unregistering."""
# Get the object to work with
obj = self.obj
# Check for log entries
self.assertTrue(obj.history.count() == 0, msg="There are no log entries")
def test_unregister_update(self):
"""Updates are not logged after unregistering."""
# Get the object to work with
obj = self.obj
# Change something
obj.boolean = True
obj.save()
# Check for log entries
self.assertTrue(obj.history.count() == 0, msg="There are no log entries")
def test_unregister_delete(self):
"""Deletion is not logged after unregistering."""
# Get the object to work with
obj = self.obj
# Delete the object
obj.delete()
# Check for log entries
self.assertTrue(LogEntry.objects.count() == 0, msg="There are no log entries")
class ChoicesFieldModelTest(TestCase):
def setUp(self):
self.obj = ChoicesFieldModel.objects.create(
status=ChoicesFieldModel.RED,
multiplechoice=[
ChoicesFieldModel.RED,
ChoicesFieldModel.YELLOW,
ChoicesFieldModel.GREEN,
],
)
def test_changes_display_dict_single_choice(self):
self.assertTrue(
self.obj.history.latest().changes_display_dict["status"][1] == "Red",
msg="The human readable text 'Red' is displayed.",
)
self.obj.status = ChoicesFieldModel.GREEN
self.obj.save()
self.assertTrue(
self.obj.history.latest().changes_display_dict["status"][1] == "Green",
msg="The human readable text 'Green' is displayed.",
)
def test_changes_display_dict_multiplechoice(self):
self.assertTrue(
self.obj.history.latest().changes_display_dict["multiplechoice"][1]
== "Red, Yellow, Green",
msg="The human readable text 'Red, Yellow, Green' is displayed.",
)
self.obj.multiplechoice = ChoicesFieldModel.RED
self.obj.save()
self.assertTrue(
self.obj.history.latest().changes_display_dict["multiplechoice"][1]
== "Red",
msg="The human readable text 'Red' is displayed.",
)
class CharfieldTextfieldModelTest(TestCase):
def setUp(self):
self.PLACEHOLDER_LONGCHAR = "s" * 255
self.PLACEHOLDER_LONGTEXTFIELD = "s" * 1000
self.obj = CharfieldTextfieldModel.objects.create(
longchar=self.PLACEHOLDER_LONGCHAR,
longtextfield=self.PLACEHOLDER_LONGTEXTFIELD,
)
def test_changes_display_dict_longchar(self):
self.assertTrue(
self.obj.history.latest().changes_display_dict["longchar"][1]
== "{}...".format(self.PLACEHOLDER_LONGCHAR[:140]),
msg="The string should be truncated at 140 characters with an ellipsis at the end.",
)
SHORTENED_PLACEHOLDER = self.PLACEHOLDER_LONGCHAR[:139]
self.obj.longchar = SHORTENED_PLACEHOLDER
self.obj.save()
self.assertTrue(
self.obj.history.latest().changes_display_dict["longchar"][1]
== SHORTENED_PLACEHOLDER,
msg="The field should display the entire string because it is less than 140 characters",
)
def test_changes_display_dict_longtextfield(self):
self.assertTrue(
self.obj.history.latest().changes_display_dict["longtextfield"][1]
== "{}...".format(self.PLACEHOLDER_LONGTEXTFIELD[:140]),
msg="The string should be truncated at 140 characters with an ellipsis at the end.",
)
SHORTENED_PLACEHOLDER = self.PLACEHOLDER_LONGTEXTFIELD[:139]
self.obj.longtextfield = SHORTENED_PLACEHOLDER
self.obj.save()
self.assertTrue(
self.obj.history.latest().changes_display_dict["longtextfield"][1]
== SHORTENED_PLACEHOLDER,
msg="The field should display the entire string because it is less than 140 characters",
)
class PostgresArrayFieldModelTest(TestCase):
databases = "__all__"
def setUp(self):
self.obj = PostgresArrayFieldModel.objects.create(
arrayfield=[PostgresArrayFieldModel.RED, PostgresArrayFieldModel.GREEN],
)
@property
def latest_array_change(self):
return self.obj.history.latest().changes_display_dict["arrayfield"][1]
def test_changes_display_dict_arrayfield(self):
self.assertTrue(
self.latest_array_change == "Red, Green",
msg="The human readable text for the two choices, 'Red, Green' is displayed.",
)
self.obj.arrayfield = [PostgresArrayFieldModel.GREEN]
self.obj.save()
self.assertTrue(
self.latest_array_change == "Green",
msg="The human readable text 'Green' is displayed.",
)
self.obj.arrayfield = []
self.obj.save()
self.assertTrue(
self.latest_array_change == "",
msg="The human readable text '' is displayed.",
)
self.obj.arrayfield = [PostgresArrayFieldModel.GREEN]
self.obj.save()
self.assertTrue(
self.latest_array_change == "Green",
msg="The human readable text 'Green' is displayed.",
)
class AdminPanelTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.username = "test_admin"
cls.password = User.objects.make_random_password()
cls.user, created = User.objects.get_or_create(username=cls.username)
cls.user.set_password(cls.password)
cls.user.is_staff = True
cls.user.is_superuser = True
cls.user.is_active = True
cls.user.save()
cls.obj = SimpleModel.objects.create(text="For admin logentry test")
def test_auditlog_admin(self):
self.client.login(username=self.username, password=self.password)
log_pk = self.obj.history.latest().pk
res = self.client.get("/admin/auditlog/logentry/")
assert res.status_code == 200
res = self.client.get("/admin/auditlog/logentry/add/")
assert res.status_code == 200
res = self.client.get(
"/admin/auditlog/logentry/{}/".format(log_pk), follow=True
)
assert res.status_code == 200
res = self.client.get("/admin/auditlog/logentry/{}/delete/".format(log_pk))
assert res.status_code == 200
res = self.client.get("/admin/auditlog/logentry/{}/history/".format(log_pk))
assert res.status_code == 200
class NoDeleteHistoryTest(TestCase):
def test_delete_related(self):
instance = SimpleModel.objects.create(integer=1)
assert LogEntry.objects.all().count() == 1
instance.integer = 2
instance.save()
assert LogEntry.objects.all().count() == 2
instance.delete()
entries = LogEntry.objects.order_by("id")
# The "DELETE" record is always retained
assert LogEntry.objects.all().count() == 1
assert entries.first().action == LogEntry.Action.DELETE
def test_no_delete_related(self):
instance = NoDeleteHistoryModel.objects.create(integer=1)
self.assertEqual(LogEntry.objects.all().count(), 1)
instance.integer = 2
instance.save()
self.assertEqual(LogEntry.objects.all().count(), 2)
instance.delete()
entries = LogEntry.objects.order_by("id")
self.assertEqual(entries.count(), 3)
self.assertEqual(
list(entries.values_list("action", flat=True)),
[LogEntry.Action.CREATE, LogEntry.Action.UPDATE, LogEntry.Action.DELETE],
)
| 35.353216 | 107 | 0.609753 |
c528d34d52df2a1306e2c6fddf8e3bd1f2c54397 | 9,940 | py | Python | akaocr/models/modules/utils.py | qai-research/Efficient_Text_Detection | e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b | [
"MIT"
] | 2 | 2021-04-28T04:13:09.000Z | 2021-06-05T04:11:11.000Z | akaocr/models/modules/utils.py | qai-research/Efficient_Text_Detection | e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b | [
"MIT"
] | 2 | 2021-05-06T13:49:52.000Z | 2021-05-14T08:45:13.000Z | akaocr/models/modules/utils.py | qai-research/Efficient_Text_Detection | e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
_____________________________________________________________________________
Created By : Nguyen Ngoc Nghia - Nghiann3
Created Date: Fri March 12 13:00:00 VNT 2021
Project : AkaOCR core
_____________________________________________________________________________
This file contains helper functions for building the model and for loading model parameters
_____________________________________________________________________________
"""
import re
import math
import collections
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon',
'num_classes', 'width_coefficient', 'depth_coefficient',
'depth_divisor', 'min_depth', 'drop_connect_rate'])
# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'stride', 'se_ratio'])
# Change namedtuple defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
def round_filters(filters, global_params):
""" Calculate and round number of filters based on depth factor. """
factor = global_params.width_coefficient
if not factor:
return filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= factor
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
""" Round number of filters based on depth factor. """
factor = global_params.depth_coefficient
if not factor:
return repeats
return int(math.ceil(factor * repeats))
def drop_connect(inputs, p, training):
""" Drop connect. """
if not training: return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
class Conv2dStaticSamePadding(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=True, groups=1, dilation=1, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,
bias=bias, groups=groups)
self.stride = self.conv.stride
self.kernel_size = self.conv.kernel_size
self.dilation = self.conv.dilation
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
elif len(self.stride) == 1:
self.stride = [self.stride[0]] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
elif len(self.kernel_size) == 1:
self.kernel_size = [self.kernel_size[0]] * 2
def forward(self, x):
h, w = x.shape[-2:]
extra_h = (math.ceil(w / self.stride[1]) - 1) * self.stride[1] - w + self.kernel_size[1]
extra_v = (math.ceil(h / self.stride[0]) - 1) * self.stride[0] - h + self.kernel_size[0]
left = extra_h // 2
right = extra_h - left
top = extra_v // 2
bottom = extra_v - top
x = F.pad(x, [left, right, top, bottom])
x = self.conv(x)
return x
class MaxPool2dStaticSamePadding(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.pool = nn.MaxPool2d(*args, **kwargs)
self.stride = self.pool.stride
self.kernel_size = self.pool.kernel_size
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
elif len(self.stride) == 1:
self.stride = [self.stride[0]] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
elif len(self.kernel_size) == 1:
self.kernel_size = [self.kernel_size[0]] * 2
def forward(self, x):
h, w = x.shape[-2:]
extra_h = (math.ceil(w / self.stride[1]) - 1) * self.stride[1] - w + self.kernel_size[1]
extra_v = (math.ceil(h / self.stride[0]) - 1) * self.stride[0] - h + self.kernel_size[0]
left = extra_h // 2
right = extra_h - left
top = extra_v // 2
bottom = extra_v - top
x = F.pad(x, [left, right, top, bottom])
x = self.pool(x)
return x
def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
class BlockDecoder(object):
@staticmethod
def _decode_block_string(block_string):
""" Gets a block through a string notation of arguments. """
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se']) if 'se' in options else None,
stride=[int(options['s'][0])])
@staticmethod
def _encode_block_string(block):
"""Encodes a block to a string."""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""
Decodes a list of string notations to specify blocks inside the network.
:param string_list: a list of strings, each string is a notation of block
:return: a list of BlockArgs namedtuples of block args
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""
Encodes a list of BlockArgs to a list of strings.
:param blocks_args: a list of BlockArgs namedtuples of block args
:return: a list of strings, each string is a notation of block
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet(width_coefficient=None, depth_coefficient=None,
drop_connect_rate=0.2, image_size=None, num_classes=1000):
""" Creates a efficientnet model. """
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
drop_connect_rate=drop_connect_rate,
num_classes=num_classes,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None,
)
return blocks_args, global_params
def get_model_params(compound_coef):
""" Get the block args and global params for a given model """
model_name = "efficientnet-b"+str(compound_coef)
w, d,_,_ = efficientnet_params(model_name)
blocks_args, global_params = efficientnet(width_coefficient=w, depth_coefficient=d)
return blocks_args, global_params | 36.014493 | 116 | 0.630483 |
62bd0d5bebb416c6215d47512f13c7c7bad5ce62 | 1,689 | py | Python | sarafan/distance.py | sarafanio/sarafan | 1c15bcdb61c13558ca1d833547222530dd363333 | [
"MIT"
] | 2 | 2021-02-07T17:08:32.000Z | 2022-01-21T06:39:50.000Z | sarafan/distance.py | sarafanio/sarafan | 1c15bcdb61c13558ca1d833547222530dd363333 | [
"MIT"
] | 2 | 2021-02-07T15:22:57.000Z | 2021-02-07T15:24:49.000Z | sarafan/distance.py | sarafanio/sarafan | 1c15bcdb61c13558ca1d833547222530dd363333 | [
"MIT"
] | null | null | null | """Hash distance helpers.
Helps to calculate distance between two integers. Also provides helpers to convert
ascii strings and hex strings to integer.
Used to calculate distance between peers and magnets.
"""
import math
from functools import lru_cache
from Cryptodome.Hash import keccak
def ascii_to_hash(value) -> str:
"""Convert ascii string (service_id in terms of sarafan) to measurable hash.
"""
data = bytes(value, 'ascii')
return keccak.new(data=data, digest_bytes=32).hexdigest()
def hex_to_position(value: str) -> int:
"""Convert hex string to int position.
"""
return int(value, 16)
@lru_cache(50)
def ascii_to_position(value: str) -> int:
"""Convert ascii string to int position.
keccak256 hash will be used to normalize strings.
"""
return hex_to_position(ascii_to_hash(value))
def peer_hash(peer) -> int:
return ascii_to_position(peer.service_id)
def distance(x: int, y: int) -> float:
"""Calculate distance between two points.
"""
return abs(math.sin(x ^ y))
def hash_distance(hash1: str, hash2: str) -> float:
"""Distance between two hex-encoded hashes (result of `hexdigest()`).
Hash lengths should be equal.
"""
assert len(hash1) == len(hash2), "Hash length should be equal"
return distance(hex_to_position(hash1), hex_to_position(hash2))
def ascii_distance(s1: str, s2: str) -> float:
"""Distance between two ascii strings.
keccak256 hash will be used to normalize them.
"""
return distance(ascii_to_position(s1), ascii_to_position(s2))
def ascii_to_hash_distance(s: str, h: str) -> float:
return distance(ascii_to_position(s), hex_to_position(h))
| 25.984615 | 82 | 0.705743 |
9c4666ac510f0906d063d47804f318fad708d487 | 4,431 | py | Python | paropy/plot_utils.py | jnywong/paropy | 74232e295962ee80e49ccf3d090fbad8697808c3 | [
"MIT"
] | null | null | null | paropy/plot_utils.py | jnywong/paropy | 74232e295962ee80e49ccf3d090fbad8697808c3 | [
"MIT"
] | null | null | null | paropy/plot_utils.py | jnywong/paropy | 74232e295962ee80e49ccf3d090fbad8697808c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 8 16:10:19 2021
@author: wongj
"""
import numpy as np
import scipy.special as sp
import math
from scipy.integrate import cumtrapz
from matplotlib import ticker
from paropy.coreproperties import icb_radius, cmb_radius
def y_axis_sci(ax):
'''
Use scientific notation on the y-axis
'''
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1, 1))
ax.yaxis.set_major_formatter(formatter)
return ax
def rad_to_deg(phi,theta):
'''Converts radians into longitudinal and latitudinal degrees where -180 < phi_deg < 180 and -90 < theta_deg < 90 degrees'''
lon=np.zeros(len(phi))
lat=np.zeros(len(theta))
i=0
for val in phi:
lon[i]=math.degrees(val)-180
i=i+1
i=0
for val in theta:
lat[i]=math.degrees(val)-90
i=i+1
return (lon, lat)
def deg_to_rad(lon, lat):
'''Converts longitudinal and latitudinal degrees where -180 < phi_deg < 180 and -90 < theta_deg < 90 degrees into radians'''
phi = np.zeros(len(lon))
theta = np.zeros(len(lat))
i = 0
for val in lon:
phi[i] = math.radians(val)+np.pi
i = i+1
i = 0
for val in lat:
theta[i] = math.radians(val)+np.pi/2
i = i+1
return (phi, theta)
def get_Z_lim(Z,dp=1):
'''Choose Z limit for plot to dp decimal places'''
Z_lim = np.max(np.abs(Z))
Z_lim = np.round(Z_lim, dp)
return Z_lim
def streamfunction(radius,theta,ur,ut):
'''Streamfunction for merdional cuts:
- radius and theta are 1d arrays
- ur and ut are 2d arrays of size len(radius)*len(theta)
'''
r,t = np.meshgrid(radius, theta)
# integrate for streamfunction (polars)
intr = cumtrapz(ut,r,axis=1,initial=0)
intt = cumtrapz(r*ur,t,axis=0,initial=0)[:,0][:,None]
psi = -intr + intt # + C, could add constant of integration here
return (psi)
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
def C_shift(radius, rf, Z0, n_levels, lev_min=0, lev_max=1):
'''Normalise and shift the codensity field scale so that a < C < b -> 0 < (C - b)/h + 1 < 1'''
idx = np.argwhere(radius > rf)[0][0]
if rf !=0:
# b: max T near top of F-layer
Ts_max = np.mean(Z0[:, idx+1])
# a: min T outside F-layer
Ts_min = np.min(Z0[:, idx:])
else:
Ts_max = np.max(Z0)
Ts_min = np.min(Z0)
h = Ts_max-Ts_min
Z1 = (Z0 - Ts_max)/h # normalise
Z = Z1 + lev_max
levels = np.linspace(lev_min, lev_max, n_levels)
return Z, levels
def semicircle(center_x, center_y, radius, stepsize=0.1):
"""
generates coordinates for a semicircle, centered at center_x, center_y
"""
x = np.arange(center_x, center_x+radius+stepsize, stepsize)
y = np.sqrt(abs(radius**2 - x**2))
# since each x value has two corresponding y-values, duplicate x-axis.
# [::-1] is required to have the correct order of elements for plt.plot.
x = np.concatenate([x,x[::-1]])
# concatenate y and flipped y.
y = np.concatenate([y,-y[::-1]])
return x, y + center_y
def merid_outline(ax,radius,linewidth=0.5):
x,y = semicircle(0,0,radius[0], 1e-4)
ax.plot(x, y, 'k', lw=linewidth)
x,y = semicircle(0,0,radius[-1], 1e-4)
ax.plot(x, y, 'k', lw=linewidth)
ax.vlines(0,radius[0],radius[-1],'k', lw=linewidth)
ax.vlines(0,-radius[0],-radius[-1],'k', lw=linewidth)
def flayer_outline(ax, rf,linewidth=0.5):
x, y = semicircle(0, 0, rf, 1e-4)
ax.plot(x, y, '--', lw = linewidth, color='darkgray')
def tangent_cylinder_latitude(rf):
shell_gap = cmb_radius - icb_radius
if rf == 0:
ri = icb_radius/cmb_radius
tc_lat = 90 - (np.pi/2-math.acos(ri))*180/np.pi
else:
tc_lat = 90 - (np.pi/2-math.acos(rf*shell_gap/cmb_radius))*180/np.pi
return tc_lat
def polar_minimum_latitude(theta,Br):
'''
Maximum (minimum) Br in each hemisphere
'''
idx_north = np.where(Br == np.max(Br[theta < np.pi/2]))[0][0]
idx_south = np.where(Br == np.min(Br[theta > np.pi/2]))[0][0]
# Convert to latitude
pm_lat_north = 90 - theta[idx_north]*180/np.pi
pm_lat_south = 90 - theta[idx_south]*180/np.pi
return pm_lat_north, pm_lat_south
| 29.738255 | 128 | 0.618371 |
399570dd7dfb89302f9e100a3319bf917148d8f6 | 263 | py | Python | categoria/views.py | Emerson-MM-Filho/finance-control | 0d6b20501f9f601ab373b67f9c9bff9c42839f66 | [
"MIT"
] | null | null | null | categoria/views.py | Emerson-MM-Filho/finance-control | 0d6b20501f9f601ab373b67f9c9bff9c42839f66 | [
"MIT"
] | null | null | null | categoria/views.py | Emerson-MM-Filho/finance-control | 0d6b20501f9f601ab373b67f9c9bff9c42839f66 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from .models import Categoria
from.serializers import CategoriaSerializer
class CategoriasView(viewsets.ModelViewSet):
queryset = Categoria.objects.all().order_by('-created_at')
serializer_class = CategoriaSerializer | 29.222222 | 62 | 0.821293 |
33826b8435946389581cd9fd4e3aaae5effe3c4c | 2,031 | py | Python | pyexlatex/models/title/frame.py | whoopnip/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | 4 | 2020-06-08T07:17:12.000Z | 2021-11-04T21:39:52.000Z | pyexlatex/models/title/frame.py | nickderobertis/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | 24 | 2020-02-17T17:20:44.000Z | 2021-12-20T00:10:19.000Z | pyexlatex/models/title/frame.py | nickderobertis/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | null | null | null | from typing import Optional, Sequence, Union
from pyexlatex.presentation.beamer.frame.frame import Frame
from pyexlatex.models.title.title import Title
from pyexlatex.models.title.subtitle import Subtitle
from pyexlatex.models.credits.author import Author
from pyexlatex.models.date import Date
from pyexlatex.models.title.framepage import MakeFrameTitle
class TitleFrame(Frame):
def __init__(self, title: Optional[str] = None, authors: Optional[Union[str, Sequence[str]]] = None, date: Optional[str] = None,
short_title: Optional[str] = None, subtitle: Optional[str] = None, short_author: Optional[str] = None,
institutions: Optional[Sequence[Sequence[str]]] = None, short_institution: Optional[str] = None,
**kwargs):
pre_env_contents = [
Title(title, short_title=short_title) if title is not None else None,
Subtitle(subtitle) if subtitle is not None else None,
Author(
authors,
institutions=institutions,
short_institution=short_institution,
short_author=short_author
) if authors is not None else None,
Date(date) if date is not None else Date()
]
self.pre_env_content = [content for content in pre_env_contents if content is not None]
self.add_data_from_content(self.pre_env_content)
from pyexlatex.logic.builder import _build
pre_env_contents = _build(self.pre_env_content)
super().__init__(MakeFrameTitle(), label='title-frame', pre_env_contents=pre_env_contents, **kwargs)
def should_create_title_frame(title: str = None, authors: Optional[Union[str, Sequence[str]]] = None, date: str = None,
subtitle: Optional[str] = None, institutions: Optional[Sequence[Sequence[str]]] = None):
return any([
title is not None,
authors is not None,
date is not None,
subtitle is not None,
institutions is not None
]) | 46.159091 | 132 | 0.669621 |
8bdc761f410e1343043eab9109e219a401df16e8 | 3,591 | py | Python | data_processing/g_pareto_plot.py | WybeKoper/PASAF | b7052eecb686f50a1988bdb7b1a88a26fc2240b5 | [
"Apache-2.0"
] | null | null | null | data_processing/g_pareto_plot.py | WybeKoper/PASAF | b7052eecb686f50a1988bdb7b1a88a26fc2240b5 | [
"Apache-2.0"
] | null | null | null | data_processing/g_pareto_plot.py | WybeKoper/PASAF | b7052eecb686f50a1988bdb7b1a88a26fc2240b5 | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib.transforms import Bbox
import pandas as pd
from adjustText import adjust_text
import os
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
def pareto_plot(query, zoomed, latency_limit, zoomed_latency_limit):
load_pattern = "cosine"
path = "../experiment_data_processed/full_data/" + load_pattern + "/" + query
files = os.listdir(path)
fig, ax = plt.subplots()
color_per_autoscaler ={"HPA": "red", "vargav1": "purple","vargav2":"orange", "dhalion": "green", "ds2":"black", "ds2-adapted-reactive": "pink", "ds2-original-reactive":"brown", "ds2-adapted-non-reactive":"blue", "ds2-original-non-reactive":"blue"}
marker_per_autoscaler ={"HPA": "o", "vargav1": "*","vargav2":"P", "dhalion": "X", "ds2":"P", "ds2-adapted-reactive": "D", "ds2-original-reactive":"s", "ds2-adapted-non-reactive":"blue", "ds2-original-non-reactive":"blue"}
rename = {"HPA":"HPA", "vargav1":"Vargav1", "vargav2":"Vargav2", "dhalion":"Dhalion-adapted", "ds2-original-reactive": "DS2-modern", "ds2-adapted-reactive": "DS2-modern-adapted"}
latency_per_autoscaler = []
taskmanagers_per_autoscaler = []
texts = []
seen = set()
for file in files:
file_info = file.split("_")
query = file_info[0]
auto_scaler = file_info[1]
if "non" in auto_scaler:
continue
metric = file_info[2].replace(".csv", "")
if "i" in metric:
continue
df = pd.read_csv("../experiment_data_processed/full_data/" + load_pattern + "/" + query + "/" + file)
latency_list = df['latency'].tolist()
taskmanager_list = df['taskmanager'].tolist()
average_latency = sum(latency_list) / len(latency_list)
average_taskmanager = sum(taskmanager_list) / len(taskmanager_list)
if average_latency > latency_limit:
continue
if zoomed and average_latency > zoomed_latency_limit:
continue
latency_per_autoscaler.append(average_latency)
taskmanagers_per_autoscaler.append(average_taskmanager)
ax.scatter(average_taskmanager,average_latency, s=50, color=color_per_autoscaler[auto_scaler], marker=marker_per_autoscaler[auto_scaler], label=rename[auto_scaler] if auto_scaler not in seen else "")
# ax.annotate(metric, (average_taskmanager, average_latency), ha='center', size=6)
seen.add(auto_scaler)
texts.append(ax.text(average_taskmanager, average_latency, metric, ha='right', va='top', size=10))
if zoomed:
plt.ylim([0,zoomed_latency_limit])
plt.xlim([0,16])
else:
plt.ylim([0,latency_limit])
plt.xlim([0,16])
adjust_text(texts, only_move={'points':'y', 'texts':'y'}, arrowprops=dict(arrowstyle="->", color='r', lw=0))
plt.legend(loc=(1.02,0.5), labelspacing=1)
plt.grid()
plt.xlabel("Average number of taskmanagers")
plt.ylabel("Average latency (s)")
if zoomed:
path = "../figures_final/" + load_pattern + "/" + query + "/pareto_figs/" + query + "_pareto_zoomed.png"
else:
path = "../figures_final/" + load_pattern + "/" + query + "/pareto_figs/" + query + "_pareto.png"
plt.savefig(path, format="png", bbox_inches=Bbox([[0, 0], [8.0, 5.0]]), dpi=600)
# pareto_plot("query-1", False, 50, 20)
#
# pareto_plot("query-1", True, 50, 20)
# pareto_plot("query-3", False, 200, 50)
#
# pareto_plot("query-3", True, 200, 50)
pareto_plot("query-11", False, 100, 20)
pareto_plot("query-11", True, 100, 20)
| 44.8875 | 251 | 0.66054 |
76776f9496cfbf80d3263d9d2f7c39217602f38e | 666 | py | Python | skbio/io/_warning.py | JWDebelius/scikit-bio | 9df3edb46eb728f6efbd4f2db74529200ad40a77 | [
"BSD-3-Clause"
] | null | null | null | skbio/io/_warning.py | JWDebelius/scikit-bio | 9df3edb46eb728f6efbd4f2db74529200ad40a77 | [
"BSD-3-Clause"
] | null | null | null | skbio/io/_warning.py | JWDebelius/scikit-bio | 9df3edb46eb728f6efbd4f2db74529200ad40a77 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
class UnprovenFormatWarning(Warning):
"""Warn when the sniffer of a format cannot confirm expected value."""
pass
class ArgumentOverrideWarning(Warning):
"""Warn when a user provided kwarg differs from a guessed kwarg."""
pass
| 33.3 | 78 | 0.575075 |
ed9a4738c118253f0d8891ac045d4a2d659e26b6 | 970 | py | Python | Lib/lib-stdwin/VUMeter.py | 1byte2bytes/cpython | 7fbaeb819ca7b20dca048217ff585ec195e999ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | 1 | 2019-10-25T21:41:07.000Z | 2019-10-25T21:41:07.000Z | Lib/lib-stdwin/VUMeter.py | 1byte2bytes/cpython | 7fbaeb819ca7b20dca048217ff585ec195e999ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Lib/lib-stdwin/VUMeter.py | 1byte2bytes/cpython | 7fbaeb819ca7b20dca048217ff585ec195e999ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | # Module 'VUMeter'
import audio
from StripChart import StripChart
K = 1024
Rates = [0, 32*K, 16*K, 8*K]
class VUMeter(StripChart):
#
# Override define() and timer() methods
#
def define(self, parent):
self = StripChart.define(self, (parent, 128))
self.parent.need_timer(self)
self.sampling = 0
self.rate = 3
self.enable(0)
return self
#
def timer(self):
if self.sampling:
chunk = audio.wait_recording()
self.sampling = 0
nums = audio.chr2num(chunk)
ampl = max(abs(min(nums)), abs(max(nums)))
self.append(ampl)
if self.enabled and not self.sampling:
audio.setrate(self.rate)
size = Rates[self.rate]/10
size = size/48*48
audio.start_recording(size)
self.sampling = 1
if self.sampling:
self.parent.settimer(1)
#
# New methods: start() and stop()
#
def stop(self):
if self.sampling:
chunk = audio.stop_recording()
self.sampling = 0
self.enable(0)
#
def start(self):
self.enable(1)
self.timer()
| 20.208333 | 47 | 0.66701 |
c7ac144573b9ab0c170797e0e3b87dbe0846fa74 | 1,412 | py | Python | utilities/video_frame.py | syniosis76/fmb | 09e38fc6959f77ab60ba90ea075146e31568f60e | [
"MIT"
] | null | null | null | utilities/video_frame.py | syniosis76/fmb | 09e38fc6959f77ab60ba90ea075146e31568f60e | [
"MIT"
] | null | null | null | utilities/video_frame.py | syniosis76/fmb | 09e38fc6959f77ab60ba90ea075146e31568f60e | [
"MIT"
] | null | null | null | from PIL import Image
from ffpyplayer.player import MediaPlayer
from ffpyplayer.pic import SWScale
import os
import sys
import time
import traceback
def get_frame_image(frame):
frame_size = frame.get_size()
width = frame_size[0]
height = frame_size[1]
frame_converter = SWScale(width, height, frame.get_pixel_format(), ofmt='rgb24')
new_frame = frame_converter.scale(frame)
image_data = bytes(new_frame.to_bytearray()[0])
return Image.frombuffer(mode='RGB', size=(width, height), data=image_data, decoder_name='raw')
def get_video_frame(path, position):
options = {'paused': True, 'vf': ['select=gte(t\,' + str(position) + ')'], 'an': True, 'fast': True}
player = MediaPlayer(path, ff_opts=options)
count = 0
while player.get_metadata()['duration'] == None:
time.sleep(0.01)
count += 1
if count > 200:
raise TypeError('Invalid Video: ' + path)
metadata = player.get_metadata()
duration = metadata['duration']
if duration >= position + 0.1: # Tolerance
player.set_size(500,-1)
frame = None
while not frame:
frame = player.get_frame(force_refresh=True)[0]
if not frame:
time.sleep(0.01)
player.close_player()
if frame:
frame = frame[0]
image = get_frame_image(frame)
return image, duration
return None, duration | 29.416667 | 102 | 0.642351 |
83c6f0f7f9776bf4572efd2f618f0aff18a47f85 | 1,852 | py | Python | src/secml/ml/stats/c_distribution_gaussian.py | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 63 | 2020-04-20T16:31:16.000Z | 2022-03-29T01:05:35.000Z | src/secml/ml/stats/c_distribution_gaussian.py | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 5 | 2020-04-21T11:31:39.000Z | 2022-03-24T13:42:56.000Z | src/secml/ml/stats/c_distribution_gaussian.py | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 8 | 2020-04-21T09:16:42.000Z | 2022-02-23T16:28:43.000Z | """
.. module:: GaussianDistribution
:synopsis: A dataset with an array of patterns and corresponding labels
.. moduleauthor:: Ambra Demontis <ambra.demontis@unica.it>
.. moduleauthor:: Marco Melis <marco.melis@unica.it>
"""
from scipy.stats import multivariate_normal
from secml.array import CArray
from secml.core import CCreator
class CDistributionGaussian(CCreator):
"""A multivariate normal random variable.
Parameters
----------
mean : scalar, optional
Mean of the distribution (default zero)
cov : array_like or scalar, optional
Covariance matrix of the distribution (default one)
"""
def __init__(self, mean=0, cov=1):
self.mean = mean
self.cov = cov
def pdf(self, data):
"""Probability density function.
Parameters
----------
data : CArray
Quantiles, with the last axis of x denoting the components.
Returns
-------
pdf: CArray
Probability density function computed at input data.
"""
cov = self.cov
if isinstance(cov, CArray):
cov = cov.tondarray()
return CArray(multivariate_normal.pdf(data.tondarray(),
self.mean, cov))
def logpdf(self, data):
"""Log of the probability density function.
Parameters
----------
data : CArray
Quantiles, with the last axis of x denoting the components.
Returns
-------
pdf: CArray
Probability density function computed at input data.
"""
cov = self.cov
if isinstance(cov, CArray):
cov = cov.tondarray()
return CArray(multivariate_normal.logpdf(data.tondarray(),
self.mean, cov))
| 26.457143 | 74 | 0.575594 |
40860853ba793ad4eaca7c4de44bfa697de1fa2c | 279 | py | Python | tests/test_skeleton.py | jojo-31/smarter | f26712c349768606dbd7f23342e22c3f1a954c34 | [
"MIT"
] | null | null | null | tests/test_skeleton.py | jojo-31/smarter | f26712c349768606dbd7f23342e22c3f1a954c34 | [
"MIT"
] | null | null | null | tests/test_skeleton.py | jojo-31/smarter | f26712c349768606dbd7f23342e22c3f1a954c34 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from smarter.skeleton import fib
__author__ = "mjo"
__copyright__ = "mjo"
__license__ = "mit"
def test_fib():
assert fib(1) == 1
assert fib(2) == 1
assert fib(7) == 13
with pytest.raises(AssertionError):
fib(-10)
| 16.411765 | 39 | 0.620072 |
54014a50966a1b94a6e9d3a7d98769b8dba775a7 | 3,204 | py | Python | chinilla/cmds/netspace_funcs.py | Chinilla/chinilla-blockchain | 59bebcf94e65b74fbb53ad4929bbd79cb28be619 | [
"Apache-2.0"
] | null | null | null | chinilla/cmds/netspace_funcs.py | Chinilla/chinilla-blockchain | 59bebcf94e65b74fbb53ad4929bbd79cb28be619 | [
"Apache-2.0"
] | null | null | null | chinilla/cmds/netspace_funcs.py | Chinilla/chinilla-blockchain | 59bebcf94e65b74fbb53ad4929bbd79cb28be619 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
import aiohttp
from chinilla.rpc.full_node_rpc_client import FullNodeRpcClient
from chinilla.util.byte_types import hexstr_to_bytes
from chinilla.util.config import load_config
from chinilla.util.default_root import DEFAULT_ROOT_PATH
from chinilla.util.ints import uint16
from chinilla.util.misc import format_bytes
async def netstorge_async(rpc_port: Optional[int], delta_block_height: str, start: str) -> None:
"""
Calculates the estimated space on the network given two block header hashes.
"""
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
if delta_block_height:
if start == "":
blockchain_state = await client.get_blockchain_state()
if blockchain_state["peak"] is None:
print("No blocks in blockchain")
client.close()
await client.await_closed()
return None
newer_block_height = blockchain_state["peak"].height
else:
newer_block = await client.get_block_record(hexstr_to_bytes(start))
if newer_block is None:
print("Block header hash", start, "not found.")
client.close()
await client.await_closed()
return None
else:
print("newer_height", newer_block.height)
newer_block_height = newer_block.height
newer_block_header = await client.get_block_record_by_height(newer_block_height)
older_block_height = max(0, newer_block_height - int(delta_block_height))
older_block_header = await client.get_block_record_by_height(older_block_height)
network_space_bytes_estimate = await client.get_network_space(
newer_block_header.header_hash, older_block_header.header_hash
)
print(
"Older Block\n"
f"Block Height: {older_block_header.height}\n"
f"Weight: {older_block_header.weight}\n"
f"VDF Iterations: {older_block_header.total_iters}\n"
f"Header Hash: 0x{older_block_header.header_hash}\n"
)
print(
"Newer Block\n"
f"Block Height: {newer_block_header.height}\n"
f"Weight: {newer_block_header.weight}\n"
f"VDF Iterations: {newer_block_header.total_iters}\n"
f"Header Hash: 0x{newer_block_header.header_hash}\n"
)
print(format_bytes(network_space_bytes_estimate))
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node rpc is running at {rpc_port}")
else:
print(f"Exception {e}")
client.close()
await client.await_closed()
| 42.72 | 107 | 0.617041 |
1f9ef334f267b5cae68ea3c5f07f0d599c159a63 | 485 | py | Python | tests/test_movie.py | beckynayere/Movie_List | 4301e3b4e5d28a7cd0e7c09104f4d4131317f7b4 | [
"MIT"
] | 1 | 2020-10-19T06:22:09.000Z | 2020-10-19T06:22:09.000Z | tests/test_movie.py | beckynayere/Movie_List | 4301e3b4e5d28a7cd0e7c09104f4d4131317f7b4 | [
"MIT"
] | null | null | null | tests/test_movie.py | beckynayere/Movie_List | 4301e3b4e5d28a7cd0e7c09104f4d4131317f7b4 | [
"MIT"
] | null | null | null | import unittest
from app.models import Movie
from app import db
class MovieTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Movie class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_movie = Movie(1234, 'Python Must Be Crazy','A thrilling new Python Series', '/khsjha27hbs', 8.5, 129993)
def test_instance(self):
self.assertTrue(isinstance(self.new_movie, Movie))
| 25.526316 | 121 | 0.657732 |
e9f3fe9dbbb6d45bca7313f7c13ed042b208e497 | 8,310 | py | Python | sentry_sdk/integrations/logging.py | targhs/sentry-python | 50ddda7b40c2d09b853b3fa2d595438c608a7eb0 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/logging.py | targhs/sentry-python | 50ddda7b40c2d09b853b3fa2d595438c608a7eb0 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/logging.py | targhs/sentry-python | 50ddda7b40c2d09b853b3fa2d595438c608a7eb0 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import absolute_import
import logging
import datetime
from fnmatch import fnmatch
from sentry_sdk.hub import Hub
from sentry_sdk.utils import (
to_string,
event_from_exception,
current_stacktrace,
capture_internal_exceptions,
)
from sentry_sdk.integrations import Integration
from sentry_sdk._compat import iteritems
from sentry_sdk._types import MYPY
if MYPY:
from logging import LogRecord
from typing import Any
from typing import Dict
from typing import Optional
DEFAULT_LEVEL = logging.INFO
DEFAULT_EVENT_LEVEL = logging.ERROR
# Capturing events from those loggers causes recursion errors. We cannot allow
# the user to unconditionally create events from those loggers under any
# circumstances.
#
# Note: Ignoring by logger name here is better than mucking with thread-locals.
# We do not necessarily know whether thread-locals work 100% correctly in the user's environment.
_IGNORED_LOGGERS = set(
["sentry_sdk.errors", "urllib3.connectionpool", "urllib3.connection"]
)
def ignore_logger(
name, # type: str
):
# type: (...) -> None
"""This disables recording (both in breadcrumbs and as events) calls to
a logger of a specific name. Among other uses, many of our integrations
use this to prevent their actions being recorded as breadcrumbs. Exposed
to users as a way to quiet spammy loggers.
:param name: The name of the logger to ignore (same string you would pass to ``logging.getLogger``).
"""
_IGNORED_LOGGERS.add(name)
class LoggingIntegration(Integration):
identifier = "logging"
def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):
# type: (Optional[int], Optional[int]) -> None
self._handler = None
self._breadcrumb_handler = None
if level is not None:
self._breadcrumb_handler = BreadcrumbHandler(level=level)
if event_level is not None:
self._handler = EventHandler(level=event_level)
def _handle_record(self, record):
# type: (LogRecord) -> None
if self._handler is not None and record.levelno >= self._handler.level:
self._handler.handle(record)
if (
self._breadcrumb_handler is not None
and record.levelno >= self._breadcrumb_handler.level
):
self._breadcrumb_handler.handle(record)
@staticmethod
def setup_once():
# type: () -> None
old_callhandlers = logging.Logger.callHandlers
def sentry_patched_callhandlers(self, record):
# type: (Any, LogRecord) -> Any
try:
return old_callhandlers(self, record)
finally:
# This check is done twice, once also here before we even get
# the integration. Otherwise we have a high chance of getting
# into a recursion error when the integration is resolved
# (this also is slower).
if record.name not in _IGNORED_LOGGERS:
integration = Hub.current.get_integration(LoggingIntegration)
if integration is not None:
integration._handle_record(record)
logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore
def _can_record(record):
# type: (LogRecord) -> bool
"""Prevents ignored loggers from recording"""
for logger in _IGNORED_LOGGERS:
if fnmatch(record.name, logger):
return False
return True
def _breadcrumb_from_record(record):
# type: (LogRecord) -> Dict[str, Any]
return {
"type": "log",
"level": _logging_to_event_level(record.levelname),
"category": record.name,
"message": record.message,
"timestamp": datetime.datetime.utcfromtimestamp(record.created),
"data": _extra_from_record(record),
}
def _logging_to_event_level(levelname):
# type: (str) -> str
return {"critical": "fatal"}.get(levelname.lower(), levelname.lower())
COMMON_RECORD_ATTRS = frozenset(
(
"args",
"created",
"exc_info",
"exc_text",
"filename",
"funcName",
"levelname",
"levelno",
"linenno",
"lineno",
"message",
"module",
"msecs",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"stack",
"tags",
"thread",
"threadName",
"stack_info",
)
)
def _extra_from_record(record):
# type: (LogRecord) -> Dict[str, None]
return {
k: v
for k, v in iteritems(vars(record))
if k not in COMMON_RECORD_ATTRS
and (not isinstance(k, str) or not k.startswith("_"))
}
class EventHandler(logging.Handler, object):
"""
A logging handler that emits Sentry events for each log record
Note that you do not have to use this class if the logging integration is enabled, which it is by default.
"""
def emit(self, record):
# type: (LogRecord) -> Any
with capture_internal_exceptions():
self.format(record)
return self._emit(record)
def _emit(self, record):
# type: (LogRecord) -> None
if not _can_record(record):
return
hub = Hub.current
if hub.client is None:
return
client_options = hub.client.options
# exc_info might be None or (None, None, None)
#
# exc_info may also be any falsy value due to Python stdlib being
# liberal with what it receives and Celery's billiard being "liberal"
# with what it sends. See
# https://github.com/getsentry/sentry-python/issues/904
if record.exc_info and record.exc_info[0] is not None:
event, hint = event_from_exception(
record.exc_info,
client_options=client_options,
mechanism={"type": "logging", "handled": True},
)
elif record.exc_info and record.exc_info[0] is None:
event = {}
hint = {}
with capture_internal_exceptions():
event["threads"] = {
"values": [
{
"stacktrace": current_stacktrace(
client_options["with_locals"]
),
"crashed": False,
"current": True,
}
]
}
else:
event = {}
hint = {}
hint["log_record"] = record
event["level"] = _logging_to_event_level(record.levelname)
event["logger"] = record.name
# Log records from `warnings` module as separate issues
record_caputured_from_warnings_module = (
record.name == "py.warnings" and record.msg == "%s"
)
if record_caputured_from_warnings_module:
# use the actual message and not "%s" as the message
# this prevents grouping all warnings under one "%s" issue
msg = record.args[0] # type: ignore
event["logentry"] = {
"message": msg,
"params": (),
}
else:
event["logentry"] = {
"message": to_string(record.msg),
"params": record.args,
}
event["extra"] = _extra_from_record(record)
hub.capture_event(event, hint=hint)
# Legacy name
SentryHandler = EventHandler
class BreadcrumbHandler(logging.Handler, object):
"""
A logging handler that records breadcrumbs for each log record.
Note that you do not have to use this class if the logging integration is enabled, which it is by default.
"""
def emit(self, record):
# type: (LogRecord) -> Any
with capture_internal_exceptions():
self.format(record)
return self._emit(record)
def _emit(self, record):
# type: (LogRecord) -> None
if not _can_record(record):
return
Hub.current.add_breadcrumb(
_breadcrumb_from_record(record), hint={"log_record": record}
)
| 30.108696 | 110 | 0.596871 |
dad39531784e0a31650adaf82fdedb0ed0160601 | 81 | py | Python | rse/apps.py | willfurnass/RSEAdmin | 28df855e417dfb279505ae8d57b20b0eaf8ff55f | [
"MIT"
] | 13 | 2019-11-04T15:41:42.000Z | 2022-01-19T19:38:35.000Z | rse/apps.py | willfurnass/RSEAdmin | 28df855e417dfb279505ae8d57b20b0eaf8ff55f | [
"MIT"
] | 152 | 2019-07-15T15:37:31.000Z | 2022-02-09T10:50:58.000Z | rse/apps.py | willfurnass/RSEAdmin | 28df855e417dfb279505ae8d57b20b0eaf8ff55f | [
"MIT"
] | 3 | 2020-02-18T11:56:24.000Z | 2021-04-06T08:18:26.000Z | from django.apps import AppConfig
class RseConfig(AppConfig):
name = 'rse'
| 13.5 | 33 | 0.728395 |
53e52201c1934896690b7be806684fdec6283cd9 | 4,864 | py | Python | test/test_client_ip.py | leeyangjie/unit | 02f50533c4a476b91e4b39a7a2d052095d970983 | [
"Apache-2.0"
] | null | null | null | test/test_client_ip.py | leeyangjie/unit | 02f50533c4a476b91e4b39a7a2d052095d970983 | [
"Apache-2.0"
] | null | null | null | test/test_client_ip.py | leeyangjie/unit | 02f50533c4a476b91e4b39a7a2d052095d970983 | [
"Apache-2.0"
] | null | null | null | from unit.applications.lang.python import TestApplicationPython
class TestClientIP(TestApplicationPython):
prerequisites = {'modules': {'python': 'any'}}
def client_ip(self, options):
assert 'success' in self.conf(
{
"127.0.0.1:7081": {
"client_ip": options,
"pass": "applications/client_ip",
},
"[::1]:7082": {
"client_ip": options,
"pass": "applications/client_ip",
},
},
'listeners',
), 'listeners configure'
def get_xff(self, xff, sock_type='ipv4'):
port = 7081 if sock_type == 'ipv4' else 7082
return self.get(
sock_type=sock_type,
port=port,
headers={'Connection': 'close', 'X-Forwarded-For': xff},
)['body']
def setup_method(self):
self.load('client_ip')
def test_settings_client_ip_single_ip(self):
self.client_ip(
{'header': 'X-Forwarded-For', 'source': '123.123.123.123'}
)
assert self.get(port=7081)['body'] == '127.0.0.1', 'ipv4 default'
assert (
self.get(sock_type='ipv6', port=7082)['body'] == '::1'
), 'ipv6 default'
assert self.get_xff('1.1.1.1') == '127.0.0.1', 'bad source'
assert self.get_xff('blah') == '127.0.0.1', 'bad header'
assert self.get_xff('1.1.1.1', 'ipv6') == '::1', 'bad source ipv6'
self.client_ip({'header': 'X-Forwarded-For', 'source': '127.0.0.1'})
assert self.get(port=7081)['body'] == '127.0.0.1', 'ipv4 default 2'
assert (
self.get(sock_type='ipv6', port=7082)['body'] == '::1'
), 'ipv6 default 2'
assert self.get_xff('1.1.1.1') == '1.1.1.1', 'replace'
assert self.get_xff('blah') == '127.0.0.1', 'bad header 2'
assert self.get_xff('1.1.1.1', 'ipv6') == '::1', 'bad source ipv6 2'
self.client_ip({'header': 'X-Forwarded-For', 'source': '!127.0.0.1'})
assert self.get_xff('1.1.1.1') == '127.0.0.1', 'bad source 3'
assert self.get_xff('1.1.1.1', 'ipv6') == '1.1.1.1', 'replace 2'
def test_settings_client_ip_ipv4(self):
self.client_ip({'header': 'X-Forwarded-For', 'source': '127.0.0.1'})
assert (
self.get_xff('8.8.8.8, 84.23.23.11') == '84.23.23.11'
), 'xff replace'
assert (
self.get_xff('8.8.8.8, 84.23.23.11, 127.0.0.1') == '127.0.0.1'
), 'xff replace 2'
assert (
self.get_xff(['8.8.8.8', '127.0.0.1, 10.0.1.1']) == '10.0.1.1'
), 'xff replace multi'
def test_settings_client_ip_ipv6(self):
self.client_ip({'header': 'X-Forwarded-For', 'source': '::1'})
assert self.get_xff('1.1.1.1') == '127.0.0.1', 'bad source ipv4'
for ip in [
'f607:7403:1e4b:6c66:33b2:843f:2517:da27',
'2001:db8:3c4d:15::1a2f:1a2b',
'2001::3c4d:15:1a2f:1a2b',
'::11.22.33.44',
]:
assert self.get_xff(ip, 'ipv6') == ip, 'replace'
def test_settings_client_ip_recursive(self):
self.client_ip(
{
'header': 'X-Forwarded-For',
'recursive': True,
'source': ['127.0.0.1', '10.50.0.17', '10.5.2.1'],
}
)
assert self.get_xff('1.1.1.1') == '1.1.1.1', 'xff chain'
assert self.get_xff('1.1.1.1, 10.5.2.1') == '1.1.1.1', 'xff chain 2'
assert (
self.get_xff('8.8.8.8, 1.1.1.1, 10.5.2.1') == '1.1.1.1'
), 'xff chain 3'
assert (
self.get_xff('10.50.0.17, 10.5.2.1, 10.5.2.1') == '10.50.0.17'
), 'xff chain 4'
assert (
self.get_xff(['8.8.8.8', '1.1.1.1, 127.0.0.1']) == '1.1.1.1'
), 'xff replace multi'
assert (
self.get_xff(['8.8.8.8', '1.1.1.1, 127.0.0.1', '10.5.2.1'])
== '1.1.1.1'
), 'xff replace multi 2'
assert (
self.get_xff(['10.5.2.1', '10.50.0.17, 1.1.1.1', '10.5.2.1'])
== '1.1.1.1'
), 'xff replace multi 3'
assert (
self.get_xff('8.8.8.8, 2001:db8:3c4d:15::1a2f:1a2b, 127.0.0.1')
== '2001:db8:3c4d:15::1a2f:1a2b'
), 'xff chain ipv6'
def test_settings_client_ip_invalid(self):
assert 'error' in self.conf(
{
"http": {
"client_ip": {'header': 'X-Forwarded-For', 'source': []}
}
},
'settings',
), 'empty array source'
assert 'error' in self.conf(
{
"http": {
"client_ip": {'header': 'X-Forwarded-For', 'source': 'a'}
}
},
'settings',
), 'empty source invalid'
| 35.246377 | 77 | 0.476563 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.