repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
lostindarkmath/pedantic-python-decorators
pedantic/type_checking_logic/check_types.py
_get_base_generic
python
def _get_base_generic(cls: Any) -> Any: origin = cls.__origin__ if hasattr(cls, '__origin__') else None name = cls._name if hasattr(cls, '_name') else None if name is not None: return getattr(typing, name) elif origin is not None: return origin return cls
>>> from typing import List, Union, Tuple, Callable, Dict, Set >>> _get_base_generic(List) typing.List >>> _get_base_generic(List[float]) typing.List >>> _get_base_generic(List[List[float]]) typing.List >>> _get_base_generic(List[Union[int, float]]) typing.List >>> _get_base_generic(Tuple) typing.Tuple >>> _get_base_generic(Tuple[float, int]) typing.Tuple >>> _get_base_generic(Tuple[Union[int, float], str]) typing.Tuple >>> _get_base_generic(Callable[..., int]) typing.Callable >>> _get_base_generic(Callable[[Union[int, str], float], int]) typing.Callable >>> _get_base_generic(Dict) typing.Dict >>> _get_base_generic(Dict[str, str]) typing.Dict >>> _get_base_generic(Union) typing.Union >>> _get_base_generic(Union[float, int, str]) typing.Union >>> _get_base_generic(Set) typing.Set >>> _get_base_generic(Set[int]) typing.Set
https://github.com/lostindarkmath/pedantic-python-decorators/blob/66865a958a36440b48e790f22ea42d2beb725b16/pedantic/type_checking_logic/check_types.py#L413-L455
import inspect import typing from io import BytesIO, StringIO, BufferedWriter, TextIOWrapper from typing import Any, Dict, Iterable, ItemsView, Callable, Union, Optional, Tuple, Mapping, TypeVar, NewType import collections import sys from pedantic.constants import TypeVar as TypeVar_ from pedantic.exceptions import PedanticTypeCheckException, PedanticTypeVarMismatchException, PedanticException def _assert_value_matches_type(value: Any, type_: Any, err: str, type_vars: Dict[TypeVar_, Any], key: Optional[str] = None, msg: Optional[str] = None ) -> None: if not _check_type(value=value, type_=type_, err=err, type_vars=type_vars): t = type(value) value = f'{key}={value}' if key is not None else str(value) if not msg: msg = f'{err}Type hint is incorrect: Argument {value} of type {t} does not match expected type {type_}.' raise PedanticTypeCheckException(msg) def _check_type(value: Any, type_: Any, err: str, type_vars: Dict[TypeVar_, Any]) -> bool: if type_ is None: return value == type_ elif isinstance(type_, str): class_name = value.__class__.__name__ base_class_name = value.__class__.__base__.__name__ return class_name == type_ or base_class_name == type_ if isinstance(type_, tuple): raise PedanticTypeCheckException(f'{err}Use "Tuple[]" instead of "{type_}" as type hint.') if isinstance(type_, list): raise PedanticTypeCheckException(f'{err}Use "List[]" instead of "{type_}" as type hint.') if type_ is tuple: raise PedanticTypeCheckException(f'{err}Use "Tuple[]" instead of "tuple" as type hint.') if type_ is list: raise PedanticTypeCheckException(f'{err}Use "List[]" instead of "list" as type hint.') if type_ is dict: raise PedanticTypeCheckException(f'{err}Use "Dict[]" instead of "dict" as type hint.') if type_ is set: raise PedanticTypeCheckException(f'{err}Use "Set[]" instead of "set" as type hint.') if type_ is frozenset: raise PedanticTypeCheckException(f'{err}Use "FrozenSet[]" instead of "frozenset" as type hint.') if type_ is type: raise PedanticTypeCheckException(f'{err}Use "Type[]" instead of "type" as type hint.') try: return _is_instance(obj=value, type_=type_, type_vars=type_vars) except PedanticTypeCheckException as ex: raise PedanticTypeCheckException(f'{err} {ex}') except PedanticTypeVarMismatchException as ex: raise PedanticTypeVarMismatchException(f'{err} {ex}') except (AttributeError, Exception) as ex: raise PedanticTypeCheckException( f'{err}An error occurred during type hint checking. Value: {value} Annotation: ' f'{type_} Mostly this is caused by an incorrect type annotation. Details: {ex} ') def _is_instance(obj: Any, type_: Any, type_vars: Dict[TypeVar_, Any]) -> bool: if not _has_required_type_arguments(type_): raise PedanticTypeCheckException( f'The type annotation "{type_}" misses some type arguments e.g. ' f'"typing.Tuple[Any, ...]" or "typing.Callable[..., str]".') if type_.__module__ == 'typing': if _is_generic(type_): origin = _get_base_generic(type_) else: origin = type_ name = _get_name(origin) if name in _SPECIAL_INSTANCE_CHECKERS: validator = _SPECIAL_INSTANCE_CHECKERS[name] return validator(obj, type_, type_vars) if type_ == typing.BinaryIO: return isinstance(obj, (BytesIO, BufferedWriter)) elif type_ == typing.TextIO: return isinstance(obj, (StringIO, TextIOWrapper)) if _is_generic(type_): python_type = type_.__origin__ if not isinstance(obj, python_type): return False base = _get_base_generic(type_) type_args = _get_type_arguments(cls=type_) if base in _ORIGIN_TYPE_CHECKERS: validator = _ORIGIN_TYPE_CHECKERS[base] return validator(obj, type_args, type_vars) assert base.__base__ == typing.Generic, f'Unknown base: {base}' return isinstance(obj, base) if isinstance(type_, TypeVar): constraints = type_.__constraints__ if len(constraints) > 0 and type(obj) not in constraints: return False if _is_forward_ref(type_=type_.__bound__): return type(obj).__name__ == type_.__bound__.__forward_arg__ if type_.__bound__ is not None and not isinstance(obj, type_.__bound__): return False if type_ in type_vars: other = type_vars[type_] if type_.__contravariant__: if not _is_subtype(sub_type=other, super_type=obj.__class__): raise PedanticTypeVarMismatchException( f'For TypeVar {type_} exists a type conflict: value {obj} has type {type(obj)} but TypeVar {type_} ' f'was previously matched to type {other}') else: if not _is_instance(obj=obj, type_=other, type_vars=type_vars): raise PedanticTypeVarMismatchException( f'For TypeVar {type_} exists a type conflict: value {obj} has type {type(obj)} but TypeVar {type_} ' f'was previously matched to type {other}') type_vars[type_] = type(obj) return True if _is_forward_ref(type_=type_): return type(obj).__name__ == type_.__forward_arg__ if _is_type_new_type(type_): return isinstance(obj, type_.__supertype__) if hasattr(obj, '_asdict'): if hasattr(type_, '_field_types'): field_types = type_._field_types elif hasattr(type_, '__annotations__'): field_types = type_.__annotations__ else: return False if not obj._asdict().keys() == field_types.keys(): return False return all([_is_instance(obj=obj._asdict()[k], type_=v, type_vars=type_vars) for k, v in field_types.items()]) return isinstance(obj, type_) def _is_forward_ref(type_: Any) -> bool: return hasattr(typing, 'ForwardRef') and isinstance(type_, typing.ForwardRef) or hasattr(typing, '_ForwardRef') and isinstance(type_, typing._ForwardRef) def _is_type_new_type(type_: Any) -> bool: return type_.__qualname__ == NewType('name', int).__qualname__ def _get_name(cls: Any) -> str: if hasattr(cls, '_name'): return cls._name elif hasattr(cls, '__name__'): return cls.__name__ else: return type(cls).__name__[1:] def _is_generic(cls: Any) -> bool: if hasattr(typing, '_SpecialGenericAlias') and isinstance(cls, typing._SpecialGenericAlias): return True elif hasattr(typing, '_GenericAlias'): if isinstance(cls, typing._GenericAlias): return True if isinstance(cls, typing._SpecialForm): return cls not in {Any} elif isinstance(cls, (typing.GenericMeta, typing._Union, typing._Optional, typing._ClassVar)): return True return False def _has_required_type_arguments(cls: Any) -> bool: base: str = _get_name(cls=cls) num_type_args = len(_get_type_arguments(cls=cls)) if base in NUM_OF_REQUIRED_TYPE_ARGS_EXACT: return NUM_OF_REQUIRED_TYPE_ARGS_EXACT[base] == num_type_args elif base in NUM_OF_REQUIRED_TYPE_ARGS_MIN: return NUM_OF_REQUIRED_TYPE_ARGS_MIN[base] <= num_type_args return True def _get_type_arguments(cls: Any) -> Tuple[Any, ...]: result = () if hasattr(cls, '__args__'): result = cls.__args__ origin = _get_base_generic(cls=cls) if origin != cls and ((origin is Callable) or (origin is collections.abc.Callable)) and result[0] is not Ellipsis: result = (list(result[:-1]), result[-1]) result = result or () return result if '[' in str(cls) else ()
Apache License 2.0
seung-lab/chunkflow
chunkflow/chunk/base.py
Chunk.ndoffset
python
def ndoffset(self) -> tuple: if self.ndim == 4: return (0, *self.voxel_offset) else: return self.voxel_offset
make the voxel offset have the same dimension with array
https://github.com/seung-lab/chunkflow/blob/0e032cdf4f2ba104af4f7809ac11df17352384ed/chunkflow/chunk/base.py#L395-L402
from typing import Union import os from numbers import Number import h5py import numpy as np import nrrd from numpy.core.numerictypes import issubdtype from numpy.lib.mixins import NDArrayOperatorsMixin from scipy.ndimage import gaussian_filter import tifffile import cc3d from cloudvolume.lib import yellow, Bbox from chunkflow.lib.bounding_boxes import BoundingBox from .validate import validate_by_template_matching class Chunk(NDArrayOperatorsMixin): def __init__(self, array: np.ndarray, voxel_offset: tuple = None, voxel_size: tuple = None): assert isinstance(array, np.ndarray) or isinstance(array, Chunk) self.array = array if voxel_offset is None: if isinstance(array, Chunk): self.array = array.array voxel_offset = array.voxel_offset else: voxel_offset = (0, 0, 0) if voxel_offset is not None: if len(voxel_offset) == 4: assert voxel_offset[0] == 0 voxel_offset = voxel_offset[1:] assert len(voxel_offset) == 3 self.voxel_offset = voxel_offset self.voxel_size = voxel_size if voxel_size is not None: assert len(voxel_size) == 3 assert np.alltrue([vs > 0 for vs in voxel_size]) assert array.ndim >= 3 and array.ndim <= 4 _HANDLED_TYPES = (np.ndarray, Number) @classmethod def from_array(cls, array: np.ndarray, bbox: BoundingBox, voxel_size: tuple = None): return cls(array, voxel_offset=bbox.minpt, voxel_size=voxel_size) @classmethod def from_bbox(cls, bbox: BoundingBox, dtype: type = np.uint8, voxel_size: tuple=None, all_zero: bool=False): assert isinstance(bbox, BoundingBox) size = bbox.maxpt - bbox.minpt return cls.create(size=size, dtype=dtype, voxel_offset=bbox.minpt, voxel_size=voxel_size, all_zero=all_zero) @classmethod def create(cls, size: tuple = (64, 64, 64), dtype: type = np.uint8, voxel_offset: tuple = (0, 0, 0), voxel_size: tuple = None, all_zero: bool = False): if isinstance(dtype, str): dtype = np.dtype(dtype) if all_zero: chunk = np.zeros(size, dtype=dtype) else: ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in size[-3:]], indexing='ij') chunk = np.abs(np.sin(4 * (ix + iy + iz))) if len(size) == 4: chunk = np.expand_dims(chunk, axis=0) chunk = np.repeat(chunk, size[0], axis=0) if np.dtype(dtype) == np.uint8: chunk = (chunk * 255).astype( dtype ) elif np.dtype(dtype) == np.uint32: chunk = (chunk>0.5).astype(dtype) elif np.issubdtype(dtype, np.floating): chunk = chunk.astype(dtype) else: raise NotImplementedError() return cls(chunk, voxel_offset=voxel_offset, voxel_size=voxel_size) def clone(self): return Chunk(self.array.copy(), voxel_offset=self.voxel_offset, voxel_size=self.voxel_size) @classmethod def from_nrrd(cls, file_name: str, voxel_offset: tuple=None, dtype: str = None, voxel_size: tuple=None): arr, _ = nrrd.read(file_name) if dtype: arr = arr.astype(dtype) return cls(arr, voxel_offset=voxel_offset, voxel_size=voxel_size) def to_nrrd(self, file_name: str=None): if file_name is None: file_name = f'{self.bbox.to_filename()}.nrrd' elif not file_name.endswith('.nrrd'): file_name += f'_{self.bbox.to_filename()}.nrrd' print('write chunk to file: ', file_name) nrrd.write(file_name, self.array) @classmethod def from_tif(cls, file_name: str, voxel_offset: tuple=None, dtype: str = None, voxel_size: tuple=None): arr = tifffile.imread(file_name) if dtype: arr = arr.astype(dtype) print(f'read tif chunk with size of {arr.shape}, voxel offset: {voxel_offset}, voxel size: {voxel_size}') return cls(arr, voxel_offset=voxel_offset, voxel_size=voxel_size) def to_tif(self, file_name: str=None): if file_name is None: file_name = f'{self.bbox.to_filename()}.tif' print('write chunk to file: ', file_name) if self.array.dtype==np.float32: print(yellow('transforming data type from float32 to uint8')) img = self.array*255 img = img.astype( np.uint8 ) else: img = self.array tifffile.imwrite(file_name, data=img) @classmethod def from_h5(cls, file_name: str, voxel_offset: tuple=None, dataset_path: str = None, voxel_size: tuple = None, cutout_start: tuple = None, cutout_stop: tuple = None, cutout_size: tuple = None, zero_filling: bool = False, dtype: str = None): assert os.path.exists(file_name) if cutout_start is not None and cutout_size is not None: cutout_stop = tuple(t+s for t, s in zip(cutout_start, cutout_size)) if not h5py.is_hdf5(file_name): assert cutout_start is not None assert cutout_stop is not None bbox = BoundingBox.from_list([*cutout_start, *cutout_stop]) file_name += f'{bbox.to_filename()}.h5' if not os.path.exists(file_name) and zero_filling: assert dtype is not None print(f'file do not exist, will fill with zero: {file_name}') return cls.from_bbox(bbox, dtype=dtype, voxel_size=voxel_size, all_zero=True) with h5py.File(file_name, 'r') as f: if dataset_path is None: for key in f.keys(): if 'offset' not in key and 'unique' not in key: dataset_path = key break dset = f[dataset_path] if voxel_offset is None: if 'voxel_offset' in f: voxel_offset = tuple(f['voxel_offset']) else: voxel_offset = (0, 0, 0) if voxel_size is None: if 'voxel_size' in f: voxel_size = tuple(f['voxel_size']) else: voxel_size = (1, 1, 1) if cutout_start is None: cutout_start = voxel_offset if cutout_size is None: cutout_size = dset.shape[-3:] if cutout_stop is None: cutout_stop = tuple(t+s for t, s in zip(cutout_start, cutout_size)) for c, v in zip(cutout_start, voxel_offset): assert c >= v, "can only cutout after the global voxel offset." assert len(cutout_start) == 3 assert len(cutout_stop) == 3 dset = dset[..., cutout_start[0]-voxel_offset[0]:cutout_stop[0]-voxel_offset[0], cutout_start[1]-voxel_offset[1]:cutout_stop[1]-voxel_offset[1], cutout_start[2]-voxel_offset[2]:cutout_stop[2]-voxel_offset[2], ] print(f"""read from HDF5 file: {file_name} and start with {cutout_start}, \ ends with {cutout_stop}, size is {cutout_size}, voxel size is {voxel_size}.""") arr = np.asarray(dset) if arr.dtype == np.dtype('<f4'): arr = arr.astype('float32') elif arr.dtype == np.dtype('<f8'): arr = arr.astype('float64') print('new chunk voxel offset: {}'.format(cutout_start)) return cls(arr, voxel_offset=cutout_start, voxel_size=voxel_size) def to_h5(self, file_name: str, with_offset: bool=True, chunk_size: tuple=(64,64,64), with_unique: bool= True, compression="gzip", voxel_size: tuple = None): if chunk_size: assert len(chunk_size) == 3 if not file_name.endswith('.h5'): file_name += self.bbox.to_filename() + '.h5' print('write chunk to file: ', file_name) if os.path.exists(file_name): print(yellow(f'deleting existing file: {file_name}')) os.remove(file_name) with h5py.File(file_name, 'w') as f: f.create_dataset('/main', data=self.array, chunks=chunk_size, compression=compression) if voxel_size is None and self.voxel_size is not None: voxel_size = self.voxel_size if voxel_size is not None: f.create_dataset('/voxel_size', data=voxel_size) if with_offset and self.voxel_offset is not None: f.create_dataset('/voxel_offset', data=self.voxel_offset) if with_unique and self.is_segmentation: unique = np.unique(self.array) if unique[0]: unique = unique[1:] f.create_dataset('/unique_nonzeros', data = unique) return file_name def __array__(self): return self.array def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): out = kwargs.get('out', ()) for x in inputs + out: if not isinstance(x, self._HANDLED_TYPES + (Chunk,)): return NotImplemented inputs = tuple(x.array if isinstance(x, Chunk) else x for x in inputs) if out: kwargs['out'] = tuple( x.array if isinstance(x, Chunk) else x for x in out) result = getattr(ufunc, method)(*inputs, **kwargs) if type(result) is tuple: return tuple(type(self)(x, voxel_offset=self.voxel_offset, voxel_size=self.voxel_size) for x in result) elif method == 'at': return None elif isinstance(result, Number): return result elif isinstance(result, np.ndarray): return type(self)(result, voxel_offset=self.voxel_offset, voxel_size=self.voxel_size) else: return result def __getitem__(self, index): return self.array[index] def __setitem__(self, key, value): self.array[key] = value def __repr__(self): return f'array: {self.array}\n voxel offset: {self.voxel_offset} \n voxel size: {self.voxel_size}' def __eq__(self, value): if isinstance(value, type(self)): return np.array_equal(self.array, value.array) and np.array_equal( self.voxel_offset, value.voxel_offset) elif isinstance(value, Number): return np.all(self.array==value) elif isinstance(value, np.ndarray): return np.all(self.array == value) else: raise NotImplementedError def set_properties(self, properties: dict): if 'voxel_offset' in properties: self.voxel_offset = properties['voxel_offset'] if 'voxel_size' in properties: self.voxel_size = properties['voxel_size'] @property def properties(self) -> dict: props = dict() if self.voxel_offset is not None or self.voxel_offset != (0, 0, 0): props['voxel_offset'] = self.voxel_offset if self.voxel_size is not None or self.voxel_size != (1, 1, 1): props['voxel_size'] = self.voxel_size return props @property def slices(self) -> tuple: return tuple( slice(o, o + s) for o, s in zip(self.ndoffset, self.shape)) @property def is_image(self) -> bool: return self.array.ndim == 3 and self.array.dtype == np.uint8 @property def is_segmentation(self) -> bool: return self.array.ndim == 3 and (np.issubdtype( self.array.dtype, np.integer) or np.issubdtype( self.dtype, np.bool8)) and self.array.dtype != np.uint8 @property def is_affinity_map(self) -> bool: return self.array.ndim == 4 and self.shape[0] == 3 and self.array.dtype == np.float32 @property def is_probability_map(self) -> bool: return self.array.ndim == 4 and self.array.dtype == np.float32 @property
Apache License 2.0
twisted/axiom
axiom/tags.py
Catalog.tagNames
python
def tagNames(self): return self.store.query(_TagName, _TagName.catalog == self).getColumn("name")
Return an iterator of unicode strings - the unique tag names which have been applied objects in this catalog.
https://github.com/twisted/axiom/blob/28191ede99287e9a87c1ff561b831f7d80aaa2fe/axiom/tags.py#L83-L88
from epsilon.extime import Time from axiom.item import Item from axiom.attributes import text, reference, integer, AND, timestamp class Tag(Item): typeName = 'tag' schemaVersion = 1 name = text(doc=""" The short string which is being applied as a tag to an Item. """) created = timestamp(doc=""" When this tag was applied to the Item to which it applies. """) object = reference(doc=""" The Item to which this tag applies. """) catalog = reference(doc=""" The L{Catalog} item in which this tag was created. """) tagger = reference(doc=""" An optional reference to the Item which is responsible for this tag's existence. """) class _TagName(Item): typeName = 'tagname' name = text(doc=""" The short string which uniquely represents this tag. """, indexed=True) catalog = reference(doc=""" The L{Catalog} item in which this tag exists. """) class Catalog(Item): typeName = 'tag_catalog' schemaVersion = 2 tagCount = integer(default=0) def tag(self, obj, tagName, tagger=None): if self.store.findFirst(Tag, AND(Tag.object == obj, Tag.name == tagName, Tag.catalog == self)): return self.store.findOrCreate(_TagName, name=tagName, catalog=self) self.tagCount += 1 Tag(store=self.store, object=obj, name=tagName, catalog=self, created=Time(), tagger=tagger)
MIT License
fredhutch/proxmox-tools
prox/cmdprox.py
ssh_exec
python
def ssh_exec(user, pwd, commands, host): if not isinstance(commands, list): print('commands parameter in ssh_exec needs to be a list') return False ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy( paramiko.AutoAddPolicy()) ssh.connect(host, username=user, password=pwd) for command in commands: stdin, stdout, stderr = ssh.exec_command(command) for line in stdout.readlines(): print(line.strip())
execute list of commands via ssh
https://github.com/fredhutch/proxmox-tools/blob/cfd4d7333969d3ad8af80f15be56d0d5052fee4e/prox/cmdprox.py#L949-L961
import sys, os, subprocess, re, platform, getpass, argparse, logging, hostlist import time, warnings, functools, random, json, requests, paramiko, socket try: import easygui except: pass with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) try: from .pyproxmox import * except: from pyproxmox import * logging.basicConfig(level=logging.WARNING) __app__ = 'Proxmox command line deployment tool' PROXHOST = os.getenv('PPROXHOST', 'proxa1.fhcrc.org') REALM = os.getenv('PREALM', 'FHCRC.ORG') LXCIMAGE = os.getenv('PLXCIMAGE', 'proxnfs:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz') STORLOC = os.getenv('PSTORLOC', 'proxZFS') STORNET = os.getenv('PSTORNET', 'proxnfs') USERDB = os.getenv('PUSERDB', 'https://toolbox.fhcrc.org/json/sc_users.json') EXCLUDEHOSTS = ['proxa5'] CHEFVERSION = '12.19.36' homedir = os.path.expanduser("~") def parse_arguments(): parser = argparse.ArgumentParser(prog='prox ', description='a tool for deploying resources from proxmox ' + '(LXC containers or VMs)') parser.add_argument( '--debug', '-g', dest='debug', action='store_true', default=False, help="verbose output for all commands") subparsers = parser.add_subparsers(dest="subcommand", help='sub-command help') parser_ssh = subparsers.add_parser('assist', aliases=['gui'], help='navigate application via GUI (experimental)') parser_ssh = subparsers.add_parser('ssh', aliases=['connect'], help='connect to first host via ssh') parser_ssh.add_argument('hosts', action='store', default=[], nargs='*', help='hostname(s) of VM/containers (separated by space), ' + ' example: prox ssh host1 host2 host3') parser_list = subparsers.add_parser('list', aliases=['ls', 'show'], help='list hosts(s) with status, size and contact (optional)') parser_list.add_argument( '--all', '-a', dest='all', action='store_true', default=False, help="show all hosts (LXC and KVM)") parser_list.add_argument( '--contacts', '-c', dest='contacts', action='store_true', default=False, help="show the technical contact / owner of the machine") parser_list.add_argument( '--snapshots', '-s', dest='listsnap', action='store_true', default=False, help="list machine snapshots that can be rolled back") parser_list.add_argument('hosts', action='store', default=[], nargs='*', help='hostname(s) of VM/containers (separated by space), ' + ' example: prox modify host1 host2 host3') parser_start = subparsers.add_parser('start', aliases=['run'], help='start the host(s)') parser_start.add_argument('hosts', action='store', default=[], nargs='*', help='hostname(s) of VM/containers (separated by space), ' + ' example: prox start host1 host2 host3') parser_stop = subparsers.add_parser('stop', aliases=['shutdown'], help='stop the host(s)') parser_stop.add_argument('hosts', action='store', default=[], nargs='*', help='hostname(s) of VM/containers (separated by space), ' + ' example: prox stop host1 host2 host3') parser_destroy = subparsers.add_parser('destroy', aliases=['delete', 'rm'], help='delete the hosts(s) from disk') parser_destroy.add_argument('hosts', action='store', default=[], nargs='*', help='hostname(s) of VM/containers (separated by space), ' + ' example: prox destroy host1 host2 host3') parser_modify = subparsers.add_parser('modify', aliases=['mod'], help='modify the config of one or more hosts') parser_modify.add_argument('--mem', '-m', dest='mem', action='store', default='0', help='Memory allocation for the machine, e.g. 4G or 512') parser_modify.add_argument('--disk', '-d', dest='disk', action='store', default='0', help='disk storage allocated to the machine.') parser_modify.add_argument('--cores', '-c', dest='cores', action='store', default='0', help='Number of cores to be allocated for the machine.') parser_modify.add_argument('hosts', action='store', default=[], nargs='*', help='hostname(s) of VM/containers (separated by space), ' + ' example: prox modify host1 host2 host3') parser_snap = subparsers.add_parser('snap', aliases=['snapshot'], help='take a snapshot of the host') parser_snap.add_argument('--description', '-d', dest='snapdesc', action='store', default='', help='description of the snapshot') parser_snap.add_argument('snapname', action='store', help='name of the snapshot') parser_snap.add_argument('hosts', action='store', default=[], nargs='*', help='hostname(s) of VM/containers (separated by space), ' + ' example: prox snap host1 host2 host3') parser_rollback = subparsers.add_parser('rollback', aliases=['rb'], help='roll back a snapshot') parser_rollback.add_argument('snapname', action='store', help='name of the snapshot') parser_rollback.add_argument('hosts', action='store', default=[], nargs='*', help='hostname(s) of VM/containers (separated by space), ' + ' example: prox snap host1 host2 host3') parser_new = subparsers.add_parser('new', aliases=['create'], help='create one or more new hosts') parser_new.add_argument('--runlist', '-r', dest='runlist', action='store', default='', help='a local shell script file or a command to execute after install') parser_new.add_argument('--node', '-N', dest='node', action='store', default='', help='Hostname of Proxmox node that will be used for install') parser_new.add_argument('--mem', '-m', dest='mem', action='store', default='512', help='Memory allocation for the machine, e.g. 4G or 512 Default: 512') parser_new.add_argument('--disk', '-d', dest='disk', action='store', default='4', help='disk storage allocated to the machine. Default: 4') parser_new.add_argument('--cores', '-c', dest='cores', action='store', default='2', help='Number of cores to be allocated for the machine. Default: 2') parser_new.add_argument('--ubuntu', '-u', dest='ubuntu', action='store', default='', help='Ubuntu version: 14.04, 16.04, 17.10 or 18.04') parser_new.add_argument( '--store-net', '-s', dest='stornet', action='store_true', default=False, help="use networked storage with backup (nfs, ceph) instead of local storage") parser_new.add_argument( '--docker', '-o', dest='docker', action='store_true', default=False, help="install latest docker-ce on new machine") parser_new.add_argument( '--bootstrap', '-b', dest='bootstrap', action='store_true', default=False, help="auto-configure the system using Chef.") parser_new.add_argument( '--no-bootstrap', '-n', dest='nobootstrap', action='store_true', default=False, help="do not auto-configure the system using Chef.") parser_new.add_argument('hosts', action='store', default=[], nargs='*', help='hostname(s) of VM/containers (separated by space), ' + ' example: prox new host1 host2 host3') return parser.parse_args() def main(): uselxc = True usegui = False user = getpass.getuser() if not args.subcommand: print('usage: prox <command> [options] host1 host2 host3') print(' Please run "prox --help"') return False if args.subcommand == 'assist': if 'DISPLAY' in os.environ.keys() or sys.platform == 'win32': usegui = True if args.debug: print('Debugging ....') print(args, l) if args.subcommand in ['straaange', 'oppptions']: prn("This feature is not yet implemented.", usegui) return False check_ssh_agent() check_ssh_auth(user) pwd = os.getenv('proxpw', '') if pwd == '': pwd = os.getenv('PROXPW', '') if pwd == '': pwd = getpwd("Password for '%s':" % user, usegui) if pwd == '': return False loginname = user + '@' + REALM if user == 'root': loginname = user + '@pam' if args.subcommand in ['ssh', 'connect']: ret = subprocess.run("ssh -i %s/.ssh/id_rsa_prox %s" % (homedir, args.hosts[0]), shell=True) return True a = prox_auth(PROXHOST, loginname, pwd, True) if a.ticket is None: prn('Could not get an authentication ticket. Wrong password?', usegui) return False p = pyproxmox(a) pool = p.getPools()['data'][0]['poolid'] nodelist = p.getNodes()['data'] nodes = [] hosttempl = {} templlist = [] ourmachines = {} oursnaps = {} if args.subcommand in ['list', 'ls', 'show']: if args.contacts or args.listsnap: prn("please wait ...") for n in nodelist: node = n['node'] if node in EXCLUDEHOSTS: continue nodes.append(node) try: conts = p.getContainers(node)['data'] except: continue for c in conts: descr = '' if args.subcommand in ['list', 'ls', 'show']: if args.contacts: descr = parse_contact(p,node,c['vmid']) if args.listsnap: shots = p.getContainerSnapshots(node,c['vmid'])['data'] oursnaps[int(c['vmid'])] = shots ourmachines[int(c['vmid'])] = [c['vmid'], c[ 'name'], c['type'], c['status'], node, int(c['maxmem'])/ 1024/1024/1024, c['cpus'], int(c['maxdisk'])/1024/1024/1024, descr] if args.subcommand in ['list', 'ls', 'show']: if args.all == True: vms = p.getNodeVirtualIndex(node)['data'] for v in vms: if args.contacts: descr = parse_contact_vm(p,node,v['vmid']) if v['template'] == 1: hosttempl[v['name']] = [node, v['vmid']] templlist.append(v['name']) else: ourmachines[int(v['vmid'])] = [v['vmid'], v[ 'name'], 'kvm', v['status'], node, '', '', 0, descr] vmids = [] if args.hosts != []: vmids = getvmids(ourmachines, args.hosts) print('') if args.subcommand in ['list', 'ls', 'show'] or ( args.subcommand in [ 'start', 'stop', 'destroy', 'modify', 'mod'] and not vmids): prn(' {0: <5} {1: <20} {2: <5} {3: <9} {4: <8} {5: <5} {6: <3} {7: <5} {8: <10}'.format( 'vmid', 'name', 'type', 'status', 'node' , 'mem', 'cpu', 'disk', '')) prn(' {0: <5} {1: <20} {2: <5} {3: <9} {4: <8} {5: <5} {6: <3} {7: <5} {8: <10}'.format( '----', '--------------------', '----', '--------', '-------', '-----', '---', '-----', '')) recip = [] for k, v in sorted(ourmachines.items()): prn(' {0: <5} {1: <20.20} {2: <5} {3: <9} {4: <8} {5: <5} {6: <3} {7: <5.0f} {8: <10}'.format(*v)) recip.append(v[-1]) if args.subcommand in ['list', 'ls', 'show']: if args.listsnap and k in oursnaps.keys(): for snap in oursnaps[k]: sparent = '' sdescription = '' if 'parent' in snap.keys(): sparent = snap['parent'] if 'description' in snap.keys(): sdescription = snap['description'] sdescription = sdescription.replace('\n', ' ') if snap['name'] != 'current': prn(' snapshot: {:<15} parent: {:<15} descr: {:<25} {:<10}'.format( snap['name'] , sparent, sdescription, '')) if args.subcommand in ['list', 'ls', 'show']: if args.contacts: recip = filter(None,uniq(recip)) prn("\nContact list: " + '; '.join(recip)) if args.subcommand in ['assist', 'gui']: if not usegui: print('running "prox assist" command which will guide you ' 'through a number of choices, however no GUI is available') return False chce = [] msg = ("Running 'prox assist'! Please select from the list " "below or 'Cancel' and run 'prox --help' for other options. " "Example: 'prox new mybox1 mybox2 mybox3' will create " "3 Linux machines.") chce = easygui.choicebox(msg, __app__,['New linux machine', 'New docker host', 'New virtual machine', 'List machines', 'Start machine', 'Stop machine', 'Modify machine', 'Destroy machine']) if not chce: return False if chce.startswith('New '): args.subcommand = 'new' if chce != "New linux machine": uselxc = False else: msg = ("Please select the size of your machine. " "Memory sizes are in MB, unless you add G " "(e.g. 1G). Disk sizes are always in GB\n." "Please start small, you can always resize." ) title = "Configuring Machine Size" fieldNames = ["Memory", "# Cores", "Disk Size"] fieldValues = ['512M', '2', '4G'] fieldValues = easygui.multenterbox(msg, title, fieldNames, fieldValues) if fieldValues: args.mem, args.cores, args.disk = fieldValues else: return False elif chce.startswith('List '): args.subcommand = 'list' elif chce.startswith('Start '): args.subcommand = 'start' elif chce.startswith('Stop '): args.subcommand = 'stop' elif chce.startswith('Modify '): args.subcommand = 'modify' elif chce.startswith('Destroy '): args.subcommand = 'destroy' else: args.subcommand = 'assist' if args.subcommand in ['new', 'create', 'modify', 'mod', 'assist', 'gui']: lxccores = re.sub("[^0-9^.]", "", args.cores) lxcdisk = int(re.sub("[^0-9^.]", "", args.disk)) lxcmem = int(re.sub("[^0-9^.]", "", args.mem)) if "G" in args.mem.upper() or lxcmem <= 64: lxcmem = lxcmem*1024 if args.subcommand in ['start', 'run']: if not vmids: vmids.append(input('\nenter vmid to start:')) if vmids[-1] == '': prn('vmid is required', usegui) return False start_machines(p, ourmachines, vmids, usegui=False) pingwait(ourmachines[vmids[0]][1],1) if args.subcommand in ['stop', 'shutdown']: if not vmids: vmids.append(input('\nnot found, enter vmid to stop:')) if vmids[-1] == '': prn("no vmid entered", usegui) return False for vmid in vmids: machine = ourmachines[vmid] if machine[3] == 'stopped': prn('Machine "%s" is already stopped!' % machine[1], usegui) continue if machine[2] == 'kvm': ret = p.stopVirtualMachine(machine[4], vmid)['data'] if ret: print(ret) else: prn("host with id %s not yet stopped!" % vmid, usegui) for i in range(15): time.sleep(1) ret = p.getVirtualStatus(machine[4], vmid)['data'] prn( 'Machine {0: <4}: {1}, cpu: {2:.0%} '.format( vmid, ret['status'], ret['cpu'])) if ret['status'] == 'stopped': break else: ret = p.stopLXCContainer(machine[4], vmid)['data'] print(ret) if args.subcommand in ['modify', 'mod']: if not vmids: vmids.append(int(input('\nnot found, enter vmid to modify:'))) if vmids[-1] == '': prn("no vmid entered", usegui) return False for vmid in vmids: machine = ourmachines[vmid] if machine[2] == 'kvm': prn("currently cannot modify virtual machines.", usegui) else: ccfg = p.getContainerConfig(machine[4], vmid)['data'] rootstr=ccfg['rootfs'] post_data = {} post_data2 = {} if ccfg['cpulimit'] != lxccores and lxccores != '0': post_data['cpulimit'] = lxccores if ccfg['memory'] != lxcmem and lxcmem > 0: post_data['memory'] = lxcmem if machine[3] == 'stopped': if lxcdisk > 0: post_data['rootfs'] = re.sub(r",size=[0-9]+G", ",size=%sG" % lxcdisk, rootstr) else: post_data2 = {} if lxcdisk > 0: post_data2['disk'] = 'rootfs' post_data2['size'] = '%sG' % lxcdisk ret = p.resizeLXCContainer(machine[4], vmid, post_data2)['data'] if iserr(ret,400): prn ('Error 40X, could not resize disk. ' 'You may need to shutdown the machine to resize a disk', usegui) elif iserr(ret,500): prn ('Error 50X, could not resize disk', usegui) else: pass if post_data != {}: ret = p.setLXCContainerOptions(machine[4], vmid, post_data)['data'] if iserr(ret,400): prn ('Error 40X, could not set machine options', usegui) elif iserr(ret,500): prn ('Error 50X, could not set machine options', usegui) if post_data != {} or post_data2 != {}: ret = p.getContainerConfig(machine[4], vmid)['data'] print ('Machine reconfigured. New settings ' 'cores: %s, mem: %s MB, rootfs: %s ' % (ret['cpulimit'], ret['memory'], ret['rootfs']) ) else: prn('No changes made', usegui) if args.subcommand in ['destroy', 'delete']: if not vmids: vmids.append(input('\nnot found, enter vmid to destroy:')) if vmids[-1] == '': return False for vmid in vmids: if not int(vmid) in ourmachines: prn('machine with id %s does not exist' % vmid) return False machine = ourmachines[vmid] if machine[3] != 'stopped': print( 'Machine "%s" needs to be stopped before it can be destroyed!' % machine[1]) continue if machine[2] == 'kvm': ret = p.deleteVirtualMachine(machine[4], vmid)['data'] print(ret) else: ret = p.deleteLXCContainer(machine[4], vmid)['data'] print(ret) hip = '127.0.0.1' try: hip = socket.gethostbyname(machine[1]) except: pass ret = subprocess.run("ssh-keygen -R %s,%s > /dev/null 2>&1" % (machine[1], hip), shell=True) if args.subcommand in ['snap', 'snapshot']: if not vmids: vmids.append(input('\nnot found, enter vmid to snapshot:')) if vmids[-1] == '': return False for vmid in vmids: if not int(vmid) in ourmachines: prn('machine with id %s does not exist' % vmid) return False machine = ourmachines[vmid] if machine[2] == 'kvm': print('KVM machines are currently not supported') continue else: post_data = { 'description': args.snapdesc, 'snapname': args.snapname} ret = p.snapshotLXCContainer(machine[4],vmid,post_data)['data'] print(ret) if args.subcommand in ['rollback', 'rb']: if not vmids: vmids.append(input('\nnot found, enter vmid to snapshot:')) if vmids[-1] == '': return False for vmid in vmids: if not int(vmid) in ourmachines: prn('machine with id %s does not exist' % vmid) return False machine = ourmachines[vmid] if machine[2] == 'kvm': print('KVM machines are currently not supported') continue else: post_data = { 'snapname': args.snapname} ret = p.rollbackSnapshotLXCContainer(machine[4],vmid,args.snapname)['data'] print(ret) if args.subcommand in ['new', 'create', 'make']: myhosts = hostdedupe(ourmachines, args.hosts) if len(myhosts) == 0: msg=("enter the hostname(s) you want to deploy (separated by " "space, no domain name): ") myhosts = def_input(msg, usegui) myhosts = myhosts.split(' ') if not myhosts or myhosts == '': prn('hostname(s) are required', usegui) return False desc = 'testing' if len(args.hosts) == 0: msg=("What is the description/purpose of the system(s)? (e.g. " "testing, development, other") desc = def_input(msg, 'testing', usegui) storage = STORLOC if len(args.hosts) == 0: if yn_choice( "Do you want to use local storage on host (for better performance) ?") == 'n': storage = STORNET if args.stornet: storage = STORNET newhostids = [] if uselxc: newcontid = 0 for h in myhosts: if hostexists(h): if not yn_choice('Host "%s" already exists in DNS. ' 'This hostname will not be used. Do you still ' 'want to continue?' % h, default='n'): return False if args.node == '': mynode = random.choice(nodes) else: mynode = args.node print('installing container on node "%s" !!! ' % mynode) oldcontid = newcontid for i in range(10): newcontid = p.getClusterVmNextId()['data'] if oldcontid != newcontid: break time.sleep(1) prn( 'creating host %s with ID %s in pool %s' % (h, newcontid, pool)) try: mydummy = LXCIMAGE except: LXCIMAGE = 'proxnfs:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz' if args.ubuntu == '14.04': LXCIMAGE = 'proxnfs:vztmpl/ubuntu-14.04-standard_14.04-1_amd64.tar.gz' if args.ubuntu == '16.04': LXCIMAGE = 'proxnfs:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz' elif args.ubuntu == '17.10': LXCIMAGE = 'proxnfs:vztmpl/ubuntu-17.10-standard_17.10-1_amd64.tar.gz' elif args.ubuntu == '18.04': LXCIMAGE = 'proxnfs:vztmpl/ubuntu-18.04-standard_18.04-1_amd64.tar.gz' post_data = { 'ostemplate': LXCIMAGE, 'cpulimit': lxccores, 'memory': lxcmem, 'rootfs': lxcdisk, 'vmid': newcontid, 'description': build_notes(user, pool, desc), 'hostname': h, 'password': pwd, 'storage': storage, 'pool': pool, 'net0': 'name=eth0,bridge=vmbr0,ip=dhcp'} ret = p.createLXCContainer(mynode, post_data)['data'] print(' ...%s' % ret) newhostids.append(int(newcontid)) ourmachines[int(newcontid)] = [newcontid, h, 'lxc', 'stopped', mynode] start_machines(p, ourmachines, newhostids, usegui=False) pingwait(myhosts[-1],1) idrsapub = '' if os.path.exists('%s/.ssh/id_rsa_prox.pub' % homedir): idrsapub = '%s/.ssh/id_rsa_prox.pub' % homedir for h in myhosts: if idrsapub != '': ssh_exec('root', pwd, ['mkdir -p .ssh',], h) sftp_put('root', pwd, idrsapub, '.ssh/id_rsa_prox.pub', h) ssh_exec('root', pwd, ['cat .ssh/id_rsa_prox.pub >> .ssh/authorized_keys',], h) ssh_exec('root', pwd, ['echo "session required pam_mkhomedir.so skel=/etc/skel/ umask=0022" >> /etc/pam.d/common-account',], h) ssh_exec('root', pwd, ['echo "%s ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers.d/zz_%s' % (user, user), 'chmod 440 /etc/sudoers.d/%s' % user], h) hip = '127.0.0.1' try: hip = socket.gethostbyname(h) except: pass ret = subprocess.run("ssh-keygen -R %s,%s > /dev/null 2>&1" % (h, hip), shell=True) ret = subprocess.run("ssh-keyscan -t rsa %s >> %s/.ssh/known_hosts 2>/dev/null" % (h, homedir), shell=True) if args.docker: print('\ninstalling docker....') install_docker(pwd, h) print ('\nfixing docker and restarting services...') fixcmds = ['sed -i "s/^ExecStartPre=\/sbin\/modprobe overlay/ExecStartPre=-\/sbin\/modprobe overlay/" /lib/systemd/system/containerd.service'] fixcmds.append('systemctl daemon-reload') fixcmds.append('systemctl restart containerd docker') ssh_exec('root', pwd, fixcmds, h) loginuser='root@' dobootstrap = False if args.bootstrap: dobootstrap = True elif args.nobootstrap: dobootstrap = False else: if yn_choice("\nDo you want to install the SciComp base config (e.g. user login) ?"): dobootstrap = True if dobootstrap: loginuser='' if os.path.exists('%s/.chef' % homedir): ret = easy_par(run_chef_knife, myhosts) else: func = functools.partial(run_chef_client, pwd) ret = easy_par(func, myhosts) if idrsapub != '': for h in myhosts: ssh_exec(user, pwd, ['mkdir -p .ssh',], h) sftp_put(user, pwd, idrsapub, '.ssh/id_rsa_prox.pub', h) ssh_exec(user, pwd, ['cat .ssh/id_rsa_prox.pub >> .ssh/authorized_keys',], h) else: run_chef_knife('hostname') if args.runlist != '': func = functools.partial(runlist_exec, pwd) ret = easy_par(func, myhosts) prn("**** login: ssh %s%s" % (loginuser,myhosts[0])) ret = subprocess.run("ssh %s%s" % (loginuser, myhosts[0]), shell=True) else: myimage = args.image if myimage == '': if not usegui: msg="Please enter a template name" myimage = def_input(msg, ','.join(templlist)) else: msg=("Please enter a template name or just hit enter " "to select from a list:") myimage = easygui.choicebox(msg, __app__, ','.join(templlist)) if myimage == ','.join(templlist) and usegui: myimage = easygui.choicebox( 'You must select a image or template name', __app__, templlist) if not myimage or myimage == ','.join(templlist) or myimage == '': prn('image is required') return False notes = build_notes(user, pool) for h in myhosts: newvmid = p.getClusterVmNextId()['data'] prn( 'creating host %s with VM ID %s in pool %s' % (h, newvmid, pool)) post_data = { 'newid': newvmid, 'name': h, 'description': notes, 'pool': pool } ret = p.cloneVirtualMachine( hosttempl[myimage][0], hosttempl[myimage][1], post_data)['data'] print(' ...' + ret) newhostids.append(newvmid) if yn_choice("Do you want to start the machine(s) now?"): for n in newhostids: print('Starting host %s ..' % n) ret = p.startVirtualMachine( hosttempl[myimage][0], n)['data'] print(' ...' + ret) pingwait(myhosts[0],7) else: prn('Please start the host with "prox start <hostname>"', usegui) print('') def parse_contact(p,node,vmid): found = '' cfg = p.getContainerConfig(node,vmid)['data'] if 'description' in cfg.keys() : m = re.search('technical_contact: (.+?)@', cfg['description']) if m: found = m.group(1) return found def parse_contact_vm(p,node,vmid): found = '' cfg = p.getVirtualConfig(node,vmid)['data'] if 'description' in cfg.keys() : m = re.search('technical_contact: (.+?)@', cfg['description']) if m: found = m.group(1) return found def start_machines(p, ourmachines, vmids, usegui=False): for vmid in vmids: machine = ourmachines[vmid] ret = None sleeptime = 1 if machine[3] == 'running': prn('Machine "%s" is already running!' % machine[1], usegui) continue print('Starting host %s ..' % vmid) if machine[2] == 'kvm': ret = p.startVirtualMachine(machine[4], vmid)['data'] print('...%s' % ret) for i in range(25): time.sleep(sleeptime) ret = p.getVirtualStatus(machine[4], vmid)['data'] print('Machine {0: <4}: {1}, cpu: {2:.0%} '.format( vmid, ret['status'], ret['cpu'])) if ret['cpu'] > 0.2: break else: ret = None for i in range(15): ret = p.startLXCContainer(machine[4], vmid)['data'] if isinstance(ret, str): print(' ...%s' % ret) break time.sleep(sleeptime) sleeptime+=1 print('starting host %s, re-try %s' % (vmid, i)) if not isinstance(ret, str): print("Failed starting host id %s !" % vmid) continue sleeptime = 1 for i in range(15): time.sleep(sleeptime) sleeptime+=1 ret = p.getContainerStatus(machine[4], vmid)['data'] if not isinstance(ret, int): prn( 'Machine {0: <4}: {1}, cpu: {2:.0%} '.format( vmid, ret['status'], ret['cpu'])) if ret['status'] == 'running': break else: print(' ...Error %s' % ret) if isinstance(ret, int): prn("Failed starting host id %s !" % vmid) continue def run_chef_knife(host): knife = "knife bootstrap --no-host-key-verify " "--ssh-user root --ssh-identity-file %s/.ssh/id_rsa_prox " "--environment scicomp_prod " "--bootstrap-version %s " '--server-url "https://chef.fhcrc.org/organizations/cit" ' "--run-list 'role[cit-base]','role[scicomp_base]' " "--node-name %s " "%s" % (homedir,CHEFVERSION,host,host) if host == 'hostname': print('you can also execute this knife command manually:') print('************************************') print(knife) print('************************************') else: if os.path.exists('%s/.chef' % homedir): print('*** executing knife command:') print(knife) ret = subprocess.run(knife, shell=True) else: print ('chef/knife config dir %s/.chef does not exist.' % homedir) def run_chef_client(pwd, host): chefclient = "chef-client --environment scicomp_prod " "--validation_key /root/.chef/cit-validator.pem " "--runlist role[cit-base],role[scicomp_base] " print ('\nbootstrapping chef-client configs on %s ... please wait a few minutes ... !!!\n' % host) cmdlist = ['dpkg -i /opt/chef/tmp/chef_amd64.deb', chefclient] ssh_exec('root', pwd, cmdlist, host) def check_ssh_auth(user): if os.path.exists('%s/.ssh/id_rsa_prox' % homedir): return True else: ret = subprocess.run("ssh-keygen -q -t rsa -f %s/.ssh/id_rsa_prox -C prox-%s -N ''" % (homedir, user), shell=True) def check_ssh_agent(): SSH_AUTH_SOCK = os.getenv('SSH_AUTH_SOCK', '') if SSH_AUTH_SOCK == '': print("\nYou don't have ssh-agent running, please execute this command:") if os.path.exists('%s/.ssh/id_rsa' % homedir): print("eval $(ssh-agent -s); ssh-add\n") else: print("eval $(ssh-agent -s)\n") else: if os.path.exists('%s/.ssh/id_rsa_prox' % homedir): ret = subprocess.run("ssh-add %s/.ssh/id_rsa_prox > /dev/null 2>&1" % homedir, shell=True) def runlist_exec(pwd, myhost): prn('***** Executing run list %s on host %s........' % (args.runlist, myhost)) rlist = os.path.expanduser(args.runlist.strip()) if os.path.exists(rlist): with open(rlist) as f: commands = f.read().splitlines() prn('*** Running commands %s' % commands) ssh_exec('root', pwd, commands, myhost) else: ssh_exec('root', pwd, [args.runlist.strip(),], myhost) def install_docker(pwd, myhost): cmd = [] cmd.append('apt-get update') cmd.append('apt-get install -y apt-transport-https ca-certificates curl software-properties-common') cmd.append('apt-get install -y gpg-agent') cmd.append('curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -') cmd.append('add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"') cmd.append('apt-get update') cmd.append('apt-get install -y docker-ce') ssh_exec('root', pwd, cmd, myhost)
Apache License 2.0
derfies/panda3d-editor
src/pandaEditor/ui/mainFrame.py
MainFrame.OnFileSave
python
def OnFileSave(self, evt, saveAs=False): if self.base.doc.file_path is None or saveAs: filePath = self._GetSavePath() if filePath: self.base.doc.file_path = filePath else: return self.base.doc.save()
Save the document.
https://github.com/derfies/panda3d-editor/blob/a50939bd4bfa5c22d27a9ddee090717e8d95f404/src/pandaEditor/ui/mainFrame.py#L248-L262
import os import sys import wx import wx.aui import wx.propgrid as wxpg from pubsub import pub import panda3d.core as pm import p3d from direct.showbase.PythonUtil import getBase as get_base from wxExtra import utils as wxUtils, ActionItem from wxExtra.logpanel import LogPanel from wxExtra import AuiManagerConfig, CustomAuiToolBar, CustomMenu from pandaEditor import commands as cmds from pandaEditor.constants import MODEL_EXTENSIONS from pandaEditor.ui.viewport import Viewport from pandaEditor.ui.resourcesPanel import ResourcesPanel from pandaEditor.ui.sceneGraphPanel import SceneGraphPanel from pandaEditor.ui.propertiesPanel import PropertiesPanel from pandaEditor.ui.preferenceseditor import PreferencesEditor from pandaEditor.ui.createdialog import CreateDialog FRAME_TITLE = 'Panda Editor 0.1' TBAR_ICON_SIZE = (24, 24) WILDCARD_SCENE = '.xml|*.xml' WILDCARD_P3D = '.p3d|*.p3d' ID_FILE_NEW = wx.NewId() ID_FILE_OPEN = wx.NewId() ID_FILE_SAVE = wx.NewId() ID_FILE_SAVE_AS = wx.NewId() ID_FILE_IMPORT = wx.NewId() ID_FILE_PROJ = wx.NewId() ID_PROJ_NEW = wx.NewId() ID_PROJ_SET = wx.NewId() ID_PROJ_BUILD = wx.NewId() ID_EDIT_UNDO = wx.NewId() ID_EDIT_REDO = wx.NewId() ID_EDIT_GROUP = wx.NewId() ID_EDIT_UNGROUP = wx.NewId() ID_EDIT_PARENT = wx.NewId() ID_EDIT_UNPARENT = wx.NewId() ID_EDIT_DUPLICATE = wx.NewId() ID_EDIT_WRITE_BAM_FILE = wx.NewId() ID_EDIT_EXPORT_OBJ = wx.NewId() ID_MODIFY_PHYSICS = wx.NewId() ID_XFORM_SEL = wx.NewId() ID_XFORM_POS = wx.NewId() ID_XFORM_ROT = wx.NewId() ID_XFORM_SCL = wx.NewId() ID_XFORM_WORLD = wx.NewId() ID_VIEW_GRID = wx.NewId() ID_VIEW_TOP = wx.NewId() ID_VIEW_BOTTOM = wx.NewId() ID_VIEW_FRONT = wx.NewId() ID_VIEW_BACK = wx.NewId() ID_VIEW_RIGHT = wx.NewId() ID_VIEW_LEFT = wx.NewId() ID_CREATE_PREFAB = wx.NewId() ID_LAYOUT_GAME = wx.NewId() ID_LAYOUT_EDITOR = wx.NewId() ID_LAYOUT_BOTH = wx.NewId() ID_WIND_PANEL = wx.NewId() ID_WIND_FILE_TOOLBAR = wx.NewId() ID_WIND_EDIT_TOOLBAR = wx.NewId() ID_WIND_MODIFY_TOOLBAR = wx.NewId() ID_WIND_XFORM_TOOLBAR = wx.NewId() ID_WIND_LAYOUT_TOOLBAR = wx.NewId() ID_WIND_VIEWPORT = wx.NewId() ID_WIND_SCENE_GRAPH = wx.NewId() ID_WIND_LIGHT_LINKER = wx.NewId() ID_WIND_PROPERTIES = wx.NewId() ID_WIND_RESOURCES = wx.NewId() ID_WIND_LOG = wx.NewId() ID_WIND_PREFERENCES = wx.NewId() ID_PLAY = wx.NewId() ID_PAUSE = wx.NewId() class MainFrame(wx.Frame): def __init__(self, base, *args, **kwargs): super().__init__(*args, **kwargs) self.base = base self.preMaxPos = None self.preMaxSize = None self.Bind(wx.EVT_CLOSE, self.OnClose) self.Bind(wx.EVT_KEY_UP, p3d.wxPanda.OnKeyUp) self.Bind(wx.EVT_KEY_DOWN, p3d.wxPanda.OnKeyDown) self.Bind(wx.EVT_SIZE, self.OnSize) self.Bind(wx.EVT_MOVE, self.OnMove) pub.subscribe(self.OnUpdate, 'Update') self.cfg = wx.Config('pandaEditor') self.BuildFileActions() self.BuildEditActions() self.BuildModifyActions() self.BuildXformActions() self.BuildLayoutActions() self.pnlViewport = Viewport(self.base, self) self.pnlSceneGraph = SceneGraphPanel(self) self.pnlProps = PropertiesPanel(self) self.pnlRsrcs = ResourcesPanel(self) self.pnlLog = LogPanel(self) self.BuildAuiManager() self.mb = wx.MenuBar() self.BuildViewMenu() self.BuildCreateMenu() self.BuildWindowMenu() self.BuildMenuBar() self.RebuildPanelMenu() self.OnUpdateWindowMenu(None) def _GetSavePath(self): defaultDir = '' defaultFile = '' if self.base.doc.file_path is not None: defaultDir, defaultFile = os.path.split(self.base.doc.file_path) elif self.base.project.path is not None: defaultDir = self.base.project.GetScenesDirectory() filePath = wxUtils.file_save_dialog('Save Scene As', WILDCARD_SCENE, defaultDir=defaultDir, defaultFile=defaultFile) if filePath and os.path.exists(filePath): msg = ''.join(['The file "', filePath, '" already exists.\nDo you want to replace it?']) if wxUtils.YesNoDialog(msg, 'Replace File?', wx.ICON_WARNING) == wx.ID_NO: return False return filePath def _CheckForSave(self): if self.base.doc.dirty: msg = ''.join(['The document "', self.base.doc.title, '" was modified after last save.\nSave changes before continuing?']) result = wxUtils.YesNoCancelDialog(msg, 'Save Changes?', wx.ICON_WARNING) if result == wx.ID_YES: self.OnFileSave(None) elif result == wx.ID_CANCEL: return False return True def OnClose(self, evt): if not self._CheckForSave(): evt.Veto() return self.auiCfg.Save() if self.preMaxPos is not None: self.auiCfg.SavePosition(*self.preMaxPos) if self.preMaxSize is not None: self.auiCfg.SaveSize(*self.preMaxSize) if self.base.project.path is not None: self.cfg.Write('projDirPath', self.base.project.path) self.Show(False) try: base except NameError: sys.exit() base.userExit() def OnFileNew(self, evt): if not self._CheckForSave(): return self.base.CreateScene() self.base.doc.on_refresh() def OnFileOpen(self, evt, filePath=None): if not self._CheckForSave(): return if filePath is None: scnsDirPath = self.base.project.GetScenesDirectory() if scnsDirPath is None: scnsDirPath = os.getcwd() filePath = wxUtils.file_open_dialog('Open Scene', WILDCARD_SCENE, defaultDir=scnsDirPath) if filePath: self.base.CreateScene(filePath) self.base.doc.load()
MIT License
obi-wan3/ob13-cogs
mentionhelp/mentionhelp.py
MentionHelp._mention_help
python
async def _mention_help(self, ctx: commands.Context):
Send a message when a user mentions the bot (with no other text).
https://github.com/obi-wan3/ob13-cogs/blob/716527f8581e0345802ea2626d43324f87edf941/mentionhelp/mentionhelp.py#L79-L80
import re import discord from redbot.core import commands, Config class MentionHelp(commands.Cog): def __init__(self, bot): self.bot = bot self.config = Config.get_conf(self, 14000605, force_registration=True) default_guild = { "toggle": True } default_global = { "toggle": True, "message": None, "embed": False } self.config.register_guild(**default_guild) self.config.register_global(**default_global) @commands.Cog.listener("on_message_without_command") async def _message_listener(self, message: discord.Message): if ( message.author.bot or not await self.config.toggle() ): return if message.guild and ( await self.bot.cog_disabled_in_guild(self, message.guild) or not await self.config.guild(message.guild).toggle() ): return mention = re.compile(rf"<@!?{self.bot.user.id}>") destination = message.channel if message.guild else message.author if message.guild and not destination.permissions_for(message.guild.me).send_messages: return to_send = await self.config.message() if mention.fullmatch(message.content.strip()) and self.bot.user.id in [u.id for u in message.mentions] and to_send: if (await self.config.embed()) and ((not message.guild) or destination.permissions_for(message.guild.me).embed_links): return await destination.send(embed=discord.Embed(description=to_send, color=await self.bot.get_embed_color(destination))) return await destination.send(to_send) @commands.group(name="mentionhelp")
MIT License
medtagger/medtagger
backend/medtagger/repositories/label_tags.py
enable
python
def enable(label_tag_key: str) -> None: enabling_query = LabelTag.query.filter(LabelTag.key == label_tag_key) updated = enabling_query.update({'disabled': False}, synchronize_session='fetch') if not updated: raise InternalErrorException(f'Label Tag "{label_tag_key}" was not enabled due to unknown database error.')
Enable existing Label Tag.
https://github.com/medtagger/medtagger/blob/8b7575e55764a95d2040f3b9bcd23b6ff846ecaa/backend/medtagger/repositories/label_tags.py#L75-L80
from typing import List from medtagger.database import db_transaction_session from medtagger.database.models import LabelTag from medtagger.definitions import LabelTool from medtagger.exceptions import InternalErrorException from medtagger.types import TaskID def get_all_tags(include_disabled: bool = False) -> List[LabelTag]: query = LabelTag.query if not include_disabled: query = query.filter(~LabelTag.disabled) return query.order_by(LabelTag.key).all() def get_label_tag_by_key(label_tag_key: str) -> LabelTag: return LabelTag.query.filter(LabelTag.key == label_tag_key).one() def add_new_tag(key: str, name: str, tools: List[LabelTool], task_id: TaskID) -> LabelTag: label_tag = LabelTag(key, name, tools) label_tag.task_id = task_id with db_transaction_session() as session: session.add(label_tag) return label_tag def delete_tag_by_key(key: str) -> None: with db_transaction_session() as session: session.query(LabelTag).filter(LabelTag.key == key).delete() def update(key: str, name: str = None, tools: List[LabelTool] = None, task_id: TaskID = None) -> LabelTag: label_tag = get_label_tag_by_key(key) if name: label_tag.name = name if tools: label_tag.tools = tools if task_id: label_tag.task_id = task_id with db_transaction_session() as session: session.add(label_tag) return label_tag def disable(label_tag_key: str) -> None: disabling_query = LabelTag.query.filter(LabelTag.key == label_tag_key) updated = disabling_query.update({'disabled': True}, synchronize_session='fetch') if not updated: raise InternalErrorException(f'Label Tag "{label_tag_key}" was not disabled due to unknown database error.')
Apache License 2.0
linmx0130/ya_mxdet
train_faster_rcnn.py
train_dataset
python
def train_dataset(): train_dataset = VOCDataset(annotation_dir=cfg.annotation_dir, img_dir=cfg.img_dir, dataset_index=cfg.dataset_index, transform=train_transformation, resize_func=img_resize) return train_dataset
prepare a custom dataset return: train_dataset
https://github.com/linmx0130/ya_mxdet/blob/eaa6de7faf819f3720d8dac64c57a42dec38eed7/train_faster_rcnn.py#L37-L47
from faster_rcnn.config import cfg from VOCDataset import VOCDataset from faster_rcnn.faster_rcnn import FasterRCNN import mxnet as mx from faster_rcnn.utils import random_flip, imagenetNormalize, img_resize, random_square_crop, select_class_generator, bbox_inverse_transform, softmax_celoss_with_ignore from faster_rcnn.rpn_gt_opr import rpn_gt_opr from faster_rcnn.rpn_proposal import proposal_train import os import argparse import logging import time def logging_system(): global args logger = logging.getLogger("training") logger.setLevel(logging.INFO) fh = logging.FileHandler(os.path.join(args.save_path, args.logger), 'w') formatter = logging.Formatter( '[%(asctime)s - %(name)s - %(filename)s:%(lineno)d - %(levelname)s] %(message)s' ) fh.setFormatter(formatter) logger.addHandler(fh) ch = logging.StreamHandler() logger.addHandler(ch) return logger def train_transformation(data, label): data, label = random_flip(data, label) data = imagenetNormalize(data) return data, label
MIT License
usc-isi-i2/rltk
rltk/record.py
remove_raw_object
python
def remove_raw_object(cls): cls._remove_raw_object = True return cls
Decorator for Record class. If a Record class is decorated, raw_object will be removed once all mark properties are cached.
https://github.com/usc-isi-i2/rltk/blob/aee10ed5dd561583e60db3373ed82fe1208da1e9/rltk/record.py#L75-L81
import re from typing import Callable re_record_id = re.compile(r'^[^*]{1,255}$') re_valid_property_name = re.compile(r'^[A-Za-z_]{1}[\w]*$') class Record(object): _remove_raw_object = False def __init__(self, raw_object): self.raw_object = raw_object @property def id(self): raise NotImplementedError def __eq__(self, other): if not isinstance(other, self.__class__): return False return self.id == other.id class cached_property(property): def __init__(self, func): self.func = func def __get__(self, obj, cls): if obj is None: return self cached_name = self.func.__name__ if cached_name not in obj.__dict__: obj.__dict__[cached_name] = self.func(obj) value = obj.__dict__.get(cached_name) return value def __reduce__(self): return cached_property.__new__, (cached_property,), {'func': self.func}
MIT License
google-research/long-range-arena
lra_benchmarks/models/reformer/reformer.py
ReformerDualEncoder.apply
python
def apply(self, inputs1, inputs2, vocab_size=None, inputs1_positions=None, inputs2_positions=None, inputs1_segmentation=None, inputs2_segmentation=None, use_bfloat16=False, emb_dim=512, num_heads=8, num_layers=6, qkv_dim=512, mlp_dim=2048, max_len=2048, train=False, dropout_rate=0.1, attention_dropout_rate=0.1, classifier=True, classifier_pool='CLS', num_classes=2, interaction=None): encoder = ReformerEncoder.shared( inputs_positions=inputs1_positions, inputs_segmentation=inputs1_segmentation, vocab_size=vocab_size, use_bfloat16=use_bfloat16, emb_dim=emb_dim, num_heads=num_heads, num_layers=num_layers, qkv_dim=qkv_dim, mlp_dim=mlp_dim, max_len=max_len, train=train, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, name='encoder') inputs1_encoded = encoder(inputs1) inputs2_encoded = encoder(inputs2) encoded = common_layers.classifier_head_dual( inputs1_encoded, inputs2_encoded, num_classes, mlp_dim, pooling_mode=classifier_pool, interaction=interaction) return encoded
Applies Transformer model on text similarity. A deliberate choice to distinguish this from NLI because we may want to do different things to the model later. Dual Encoding mode enforces that we do not do cross attention between pairs. Args: inputs1: input data. inputs2: target data. vocab_size: size of the input vocabulary. inputs1_positions: input subsequence positions for packed examples. inputs2_positions: target subsequence positions for packed examples. inputs1_segmentation: input segmentation info for packed examples. inputs2_segmentation: target segmentation info for packed examples. use_bfloat16: bool: whether use bfloat16. emb_dim: dimension of embedding. num_heads: number of heads. num_layers: number of layers. qkv_dim: dimension of the query/key/value. mlp_dim: dimension of the mlp on top of attention block. max_len: maximum length. train: whether it is training. dropout_rate: dropout rate. attention_dropout_rate: dropout rate for attention weights. classifier: boolean, to use classifier. classifier_pool: str, supports "MEAN", "MAX" pooling. num_classes: int, number of classification classes. interaction: str Returns: output of a transformer decoder.
https://github.com/google-research/long-range-arena/blob/09c2916c3f33a07347dcc70c8839957d3c9d4062/lra_benchmarks/models/reformer/reformer.py#L204-L284
from flax import nn import jax.numpy as jnp from lra_benchmarks.models.layers import common_layers from lra_benchmarks.models.reformer import reformer_attention class ReformerBlock(nn.Module): def apply(self, inputs, qkv_dim, mlp_dim, num_heads, dtype=jnp.float32, causal_mask=False, inputs_segmentation=None, padding_mask=None, dropout_rate=0.1, attention_dropout_rate=0.1, deterministic=False, cache=None): assert inputs.ndim == 3 x = nn.LayerNorm(inputs) x = reformer_attention.ReformerSelfAttention( x, num_heads=num_heads, qkv_features=qkv_dim, causal_mask=causal_mask, padding_mask=padding_mask, kernel_init=nn.initializers.xavier_uniform(), bias_init=nn.initializers.normal(stddev=1e-6), bias=False, broadcast_dropout=False, dropout_rate=attention_dropout_rate, deterministic=deterministic, cache=cache) x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic) x = x + inputs y = nn.LayerNorm(x) y = common_layers.MlpBlock( y, mlp_dim=mlp_dim, dropout_rate=dropout_rate, deterministic=deterministic) return x + y class ReformerEncoder(nn.Module): def apply(self, inputs, vocab_size, inputs_positions=None, inputs_segmentation=None, shared_embedding=None, use_bfloat16=False, emb_dim=512, num_heads=8, dtype=jnp.float32, num_layers=6, qkv_dim=512, mlp_dim=2048, max_len=512, train=True, dropout_rate=0.1, attention_dropout_rate=0.1, learn_pos_emb=False, classifier=False, classifier_pool='CLS', num_classes=10): assert inputs.ndim == 2 src_padding_mask = (inputs > 0)[..., None] if shared_embedding is None: input_embed = nn.Embed.partial( num_embeddings=vocab_size, features=emb_dim, embedding_init=nn.initializers.normal(stddev=1.0)) else: input_embed = shared_embedding x = inputs.astype('int32') x = input_embed(x) if classifier and classifier_pool == 'CLS': cls = self.param('cls', (1, 1, emb_dim), nn.initializers.zeros) cls = jnp.tile(cls, [x.shape[0], 1, 1]) x = jnp.concatenate([cls, x], axis=1) max_len += 1 src_padding_mask = jnp.concatenate( [src_padding_mask[:, :1], src_padding_mask], axis=1) pe_init = nn.initializers.normal(stddev=0.02) if learn_pos_emb else None x = common_layers.AddPositionEmbs( x, inputs_positions=inputs_positions, posemb_init=pe_init, max_len=max_len, name='posembed_input') x = nn.dropout(x, rate=dropout_rate, deterministic=not train) if use_bfloat16: x = x.astype(jnp.bfloat16) dtype = jnp.bfloat16 else: dtype = jnp.float32 for lyr in range(num_layers): x = ReformerBlock( x, qkv_dim=qkv_dim, mlp_dim=mlp_dim, num_heads=num_heads, dtype=dtype, padding_mask=src_padding_mask, inputs_segmentation=inputs_segmentation, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, deterministic=not train, name=f'encoderblock_{lyr}') encoded = nn.LayerNorm(x, dtype=dtype, name='encoder_norm') if classifier: encoded = common_layers.classifier_head( encoded, num_classes, mlp_dim, pooling_mode=classifier_pool) return encoded class ReformerDualEncoder(nn.Module):
Apache License 2.0
beartype/beartype
beartype/_decor/_code/_pep/pepcode.py
_unmemoize_pep_code
python
def _unmemoize_pep_code( data: BeartypeData, func_wrapper_code: str, pith_repr: str, hint_forwardrefs_class_basename: tuple, ) -> str: assert data.__class__ is BeartypeData, f'{repr(data)} not @beartype data.' assert isinstance(func_wrapper_code, str), ( f'{repr(func_wrapper_code)} not string.') assert isinstance(pith_repr, str), f'{repr(pith_repr)} not string.' assert isinstance(hint_forwardrefs_class_basename, Iterable), ( f'{repr(hint_forwardrefs_class_basename)} not iterable.') func_wrapper_code = replace_str_substrs( text=func_wrapper_code, old=PEP_CODE_PITH_ROOT_PARAM_NAME_PLACEHOLDER, new=pith_repr, ) if hint_forwardrefs_class_basename: data.func_wrapper_locals[ARG_NAME_TYPISTRY] = bear_typistry for hint_forwardref_class_basename in hint_forwardrefs_class_basename: func_wrapper_code = replace_str_substrs( text=func_wrapper_code, old=( f'{PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_PREFIX}' f'{hint_forwardref_class_basename}' f'{PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_SUFFIX}' ), new=register_typistry_forwardref( get_hint_pep484585_forwardref_classname_relative_to_object( hint=hint_forwardref_class_basename, obj=data.func) ), ) return func_wrapper_code
Convert the passed memoized code snippet type-checking any parameter or return of the decorated callable into a memoized code snippet type-checking a specific parameter or return of that callable. Specifically, this function (in order): #. Globally replaces all references to the :data:`PEP_CODE_PITH_ROOT_PARAM_NAME_PLACEHOLDER` placeholder substring cached into this code with the passed ``pith_repr`` parameter. #. Unmemoizes this code by globally replacing all relative forward reference placeholder substrings cached into this code with Python expressions evaluating to the classes referred to by those substrings relative to that callable when accessed via the private ``__beartypistry`` parameter. Parameters ---------- data : BeartypeData Decorated callable to be type-checked. func_wrapper_code : str Memoized callable-agnostic code snippet type-checking any parameter or return of the decorated callable. pith_repr : str Machine-readable representation of the name of this parameter or return. hint_forwardrefs_class_basename : tuple Tuple of the unqualified classnames referred to by all relative forward reference type hints visitable from the current root type hint. Returns ---------- str This memoized code unmemoized by globally resolving all relative forward reference placeholder substrings cached into this code relative to the currently decorated callable.
https://github.com/beartype/beartype/blob/9da0bbebe408d281d5bfb6cc203dc6969e241aa4/beartype/_decor/_code/_pep/pepcode.py#L237-L331
from beartype.roar import BeartypeDecorHintPepException from beartype._decor._cache.cachetype import ( bear_typistry, register_typistry_forwardref, ) from beartype._decor._code.codesnip import ARG_NAME_TYPISTRY from beartype._decor._code._pep._pephint import pep_code_check_hint from beartype._decor._code._pep._pepsnip import ( PARAM_KIND_TO_PEP_CODE_LOCALIZE, PEP_CODE_CHECK_RETURN_PREFIX, PEP_CODE_CHECK_RETURN_SUFFIX, PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_PREFIX, PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_SUFFIX, PEP_CODE_PITH_ROOT_PARAM_NAME_PLACEHOLDER, ) from beartype._decor._data import BeartypeData from beartype._util.cache.utilcacheerror import reraise_exception_cached from beartype._util.hint.pep.proposal.pep484585.utilpep484585ref import ( get_hint_pep484585_forwardref_classname_relative_to_object) from beartype._util.kind.utilkinddict import update_mapping from beartype._util.text.utiltextlabel import ( prefix_callable_decorated_param, prefix_callable_decorated_return, ) from beartype._util.text.utiltextmunge import replace_str_substrs from collections.abc import Iterable from inspect import Parameter __all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL'] _RETURN_REPR = repr('return') def pep_code_check_param( data: BeartypeData, hint: object, param: Parameter, param_index: int, ) -> str: assert data.__class__ is BeartypeData, f'{repr(data)} not @beartype data.' assert isinstance(param, Parameter), ( f'{repr(param)} not parameter metadata.') assert isinstance(param_index, int), ( f'{repr(param_index)} not integer.') PARAM_LOCALIZE_TEMPLATE = PARAM_KIND_TO_PEP_CODE_LOCALIZE.get( param.kind, None) if PARAM_LOCALIZE_TEMPLATE is None: exception_prefix = prefix_callable_decorated_param( func=data.func, param_name=param.name) raise BeartypeDecorHintPepException( f'{exception_prefix}kind {repr(param.kind)} ' f'currently unsupported by @beartype.' ) try: ( code_param_check_pith, func_wrapper_locals, hint_forwardrefs_class_basename, ) = pep_code_check_hint(hint) update_mapping(data.func_wrapper_locals, func_wrapper_locals) code_param_check = _unmemoize_pep_code( data=data, func_wrapper_code=code_param_check_pith, pith_repr=repr(param.name), hint_forwardrefs_class_basename=hint_forwardrefs_class_basename, ) except Exception as exception: reraise_exception_cached( exception=exception, target_str=prefix_callable_decorated_param( func=data.func, param_name=param.name), ) code_param_localize = PARAM_LOCALIZE_TEMPLATE.format( arg_name=param.name, arg_index=param_index) return f'{code_param_localize}{code_param_check}' def pep_code_check_return(data: BeartypeData, hint: object) -> str: assert data.__class__ is BeartypeData, f'{repr(data)} not @beartype data.' hint_forwardrefs_class_basename = () try: ( code_return_check_pith, func_wrapper_locals, hint_forwardrefs_class_basename, ) = pep_code_check_hint(hint) update_mapping(data.func_wrapper_locals, func_wrapper_locals) code_return_check_prefix = PEP_CODE_CHECK_RETURN_PREFIX.format( func_call_prefix=data.func_wrapper_code_call_prefix) code_return_check_memoized = ( f'{code_return_check_prefix}' f'{code_return_check_pith}' f'{PEP_CODE_CHECK_RETURN_SUFFIX}' ) code_return_check = _unmemoize_pep_code( data=data, func_wrapper_code=code_return_check_memoized, pith_repr=_RETURN_REPR, hint_forwardrefs_class_basename=hint_forwardrefs_class_basename, ) except Exception as exception: reraise_exception_cached( exception=exception, target_str=prefix_callable_decorated_return(data.func), ) return code_return_check
MIT License
visualcomputinginstitute/3d-semantic-segmentation
tools/lazy_decorator.py
lazy_property
python
def lazy_property(function): attribute = '_cache_' + function.__name__ @property @functools.wraps(function) def decorator(self): if not hasattr(self, attribute): setattr(self, attribute, function(self)) return getattr(self, attribute) return decorator
caches the output of the property and just returns the value for next calls :param function: property to be cached :return: cached output of property
https://github.com/visualcomputinginstitute/3d-semantic-segmentation/blob/1dfc010b370a346902ad29460c9ad969c1892a97/tools/lazy_decorator.py#L10-L25
import functools
MIT License
nuagenetworks/vspk-python
vspk/v5_0/nuvirtualip.py
NUVirtualIP.associated_floating_ip_id
python
def associated_floating_ip_id(self): return self._associated_floating_ip_id
Get associated_floating_ip_id value. Notes: Id of Floating IP address associated to this virtual ip This attribute is named `associatedFloatingIPID` in VSD API.
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v5_0/nuvirtualip.py#L253-L263
from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from .fetchers import NUEventLogsFetcher from bambou import NURESTObject class NUVirtualIP(NURESTObject): __rest_name__ = "virtualip" __resource_name__ = "virtualips" CONST_IP_TYPE_IPV6 = "IPV6" CONST_IP_TYPE_IPV4 = "IPV4" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" def __init__(self, **kwargs): super(NUVirtualIP, self).__init__() self._mac = None self._ip_type = None self._last_updated_by = None self._virtual_ip = None self._entity_scope = None self._associated_floating_ip_id = None self._subnet_id = None self._external_id = None self.expose_attribute(local_name="mac", remote_name="MAC", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'IPV4', u'IPV6']) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="virtual_ip", remote_name="virtualIP", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="associated_floating_ip_id", remote_name="associatedFloatingIPID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="subnet_id", remote_name="subnetID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) @property def mac(self): return self._mac @mac.setter def mac(self, value): self._mac = value @property def ip_type(self): return self._ip_type @ip_type.setter def ip_type(self, value): self._ip_type = value @property def last_updated_by(self): return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): self._last_updated_by = value @property def virtual_ip(self): return self._virtual_ip @virtual_ip.setter def virtual_ip(self, value): self._virtual_ip = value @property def entity_scope(self): return self._entity_scope @entity_scope.setter def entity_scope(self, value): self._entity_scope = value @property
BSD 3-Clause New or Revised License
v7labs/darwin-py
darwin/dataset/remote_dataset.py
RemoteDataset.push
python
def push( self, files_to_upload: Optional[List[Union[PathLike, LocalFile]]], *, blocking: bool = True, multi_threaded: bool = True, fps: int = 0, as_frames: bool = False, files_to_exclude: Optional[List[PathLike]] = None, path: Optional[str] = None, preserve_folders: bool = False, progress_callback: Optional[ProgressCallback] = None, file_upload_callback: Optional[FileUploadCallback] = None, ): if files_to_exclude is None: files_to_exclude = [] if files_to_upload is None: raise ValueError("No files or directory specified.") uploading_files = [item for item in files_to_upload if isinstance(item, LocalFile)] search_files = [item for item in files_to_upload if not isinstance(item, LocalFile)] generic_parameters_specified = path is not None or fps != 0 or as_frames is not False if uploading_files and generic_parameters_specified: raise ValueError("Cannot specify a path when uploading a LocalFile object.") for found_file in find_files(search_files, files_to_exclude=files_to_exclude): local_path = path if preserve_folders: source_files = [source_file for source_file in search_files if is_relative_to(found_file, source_file)] if source_files: local_path = str(found_file.relative_to(source_files[0]).parent) uploading_files.append(LocalFile(found_file, fps=fps, as_frames=as_frames, path=local_path)) if not uploading_files: raise ValueError("No files to upload, check your path, exclusion filters and resume flag") handler = UploadHandler(self, uploading_files) if blocking: handler.upload( multi_threaded=multi_threaded, progress_callback=progress_callback, file_upload_callback=file_upload_callback, ) else: handler.prepare_upload() return handler
Uploads a local dataset (images ONLY) in the datasets directory. Parameters ---------- files_to_upload : Optional[List[Union[PathLike, LocalFile]]] List of files to upload. Those can be folders. blocking : bool If False, the dataset is not uploaded and a generator function is returned instead. multi_threaded : bool Uses multiprocessing to upload the dataset in parallel. If blocking is False this has no effect. files_to_exclude : Optional[PathLike]] Optional list of files to exclude from the file scan. Those can be folders. fps : int When the uploading file is a video, specify its framerate. as_frames: bool When the uploading file is a video, specify whether it's going to be uploaded as a list of frames. path: Optional[str] Optional path to store the files in. preserve_folders : bool Specify whether or not to preserve folder paths when uploading progress_callback: Optional[ProgressCallback] Optional callback, called every time the progress of an uploading files is reported. file_upload_callback: Optional[FileUploadCallback] Optional callback, called every time a file chunk is uploaded. Returns ------- handler : UploadHandler Class for handling uploads, progress and error messages
https://github.com/v7labs/darwin-py/blob/694253ec520ec32d791eb4a2d0b8acc9ad686b33/darwin/dataset/remote_dataset.py#L88-L168
import json import shutil import tempfile import zipfile from datetime import datetime from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Optional, Union from urllib import parse from darwin.dataset.download_manager import download_all_images_from_annotations from darwin.dataset.identifier import DatasetIdentifier from darwin.dataset.release import Release from darwin.dataset.split_manager import split_dataset from darwin.dataset.upload_manager import ( FileUploadCallback, LocalFile, ProgressCallback, UploadHandler, ) from darwin.dataset.utils import ( exhaust_generator, get_annotations, get_classes, is_relative_to, is_unix_like_os, make_class_lists, sanitize_filename, ) from darwin.datatypes import AnnotationClass from darwin.exceptions import NotFound, UnsupportedExportFormat from darwin.exporter.formats.darwin import build_image_annotation from darwin.item import DatasetItem, parse_dataset_item from darwin.item_sorter import ItemSorter from darwin.types import PathLike from darwin.utils import find_files, parse_darwin_json, split_video_annotation, urljoin from darwin.validators import name_taken, validation_error from rich.console import Console if TYPE_CHECKING: from darwin.client import Client class RemoteDataset: def __init__( self, *, client: "Client", team: str, name: str, slug: str, dataset_id: int, image_count: int = 0, progress: float = 0, ): self.team = team self.name = name self.slug = slug or name self.dataset_id = dataset_id self.image_count = image_count self.progress = progress self.client = client self.annotation_types = None self.console: Console = Console()
MIT License
prajdabre/yanmtt
transformers/src/transformers/models/t5/modeling_tf_t5.py
TFT5Attention.compute_bias
python
def compute_bias(self, query_length, key_length): context_position = tf.range(query_length)[:, None] memory_position = tf.range(key_length)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_position_bucket( relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, ) values = self.relative_attention_bias(relative_position_bucket) values = tf.expand_dims( tf.transpose(values, [2, 0, 1]), axis=0 ) return values
Compute binned relative position bias
https://github.com/prajdabre/yanmtt/blob/4d329c3bcb81ca432d5947bb4673897086ee7f32/transformers/src/transformers/models/t5/modeling_tf_t5.py#L226-L240
import copy import itertools import math import warnings from typing import Tuple import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPast, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFPreTrainedModel, TFSharedEmbeddings, TFWrappedEmbeddings, input_processing, keras_serializable, shape_list, ) from ...utils import logging from .configuration_t5 import T5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" _TOKENIZER_FOR_DOC = "T5Tokenizer" TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = [ "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", ] class TFT5LayerNorm(tf.keras.layers.Layer): def __init__(self, epsilon=1e-6, **kwargs): super().__init__(**kwargs) self.variance_epsilon = epsilon def build(self, input_shape): self.weight = self.add_weight("weight", shape=(input_shape[-1],), initializer="ones") super().build(input_shape) def call(self, hidden_states): variance = tf.math.reduce_mean(tf.math.square(hidden_states), axis=-1, keepdims=True) hidden_states = hidden_states * tf.math.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states class TFT5DenseReluDense(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.wi = tf.keras.layers.Dense(config.d_ff, use_bias=False, name="wi") self.wo = tf.keras.layers.Dense(config.d_model, use_bias=False, name="wo") self.dropout = tf.keras.layers.Dropout(config.dropout_rate) self.act = tf.keras.activations.relu def call(self, hidden_states, training=False): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.wo(hidden_states) return hidden_states class TFT5GatedGeluDense(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.wi_0 = tf.keras.layers.Dense(config.d_ff, use_bias=False, name="wi_0") self.wi_1 = tf.keras.layers.Dense(config.d_ff, use_bias=False, name="wi_1") self.wo = tf.keras.layers.Dense(config.d_model, use_bias=False, name="wo") self.dropout = tf.keras.layers.Dropout(config.dropout_rate) self.act = get_tf_activation("gelu_new") def call(self, hidden_states, training=False): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.wo(hidden_states) return hidden_states class TFT5LayerFF(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.feed_forward_proj == "relu": self.DenseReluDense = TFT5DenseReluDense(config, name="DenseReluDense") elif config.feed_forward_proj == "gated-gelu": self.DenseReluDense = TFT5GatedGeluDense(config, name="DenseReluDense") else: raise ValueError( f"{self.config.feed_forward_proj} is not supported. Choose between `relu` and `gated-gelu`" ) self.layer_norm = TFT5LayerNorm(epsilon=config.layer_norm_epsilon, name="layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout_rate) def call(self, hidden_states, training=False): normed_hidden_states = self.layer_norm(hidden_states) dense_output = self.DenseReluDense(normed_hidden_states, training=training) hidden_states = hidden_states + self.dropout(dense_output, training=training) return hidden_states class TFT5Attention(tf.keras.layers.Layer): NEW_ID = itertools.count() def __init__(self, config, has_relative_attention_bias=False, **kwargs): super().__init__(**kwargs) self.layer_id = next(TFT5Attention.NEW_ID) self.is_decoder = config.is_decoder self.use_cache = config.use_cache self.has_relative_attention_bias = has_relative_attention_bias self.output_attentions = config.output_attentions self.relative_attention_num_buckets = config.relative_attention_num_buckets self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.inner_dim = self.n_heads * self.key_value_proj_dim self.q = tf.keras.layers.Dense(self.inner_dim, use_bias=False, name="q") self.k = tf.keras.layers.Dense(self.inner_dim, use_bias=False, name="k") self.v = tf.keras.layers.Dense(self.inner_dim, use_bias=False, name="v") self.o = tf.keras.layers.Dense(self.d_model, use_bias=False, name="o") self.dropout = tf.keras.layers.Dropout(config.dropout_rate) if self.has_relative_attention_bias: self.relative_attention_bias = tf.keras.layers.Embedding( self.relative_attention_num_buckets, self.n_heads, name="relative_attention_bias", ) self.pruned_heads = set() def prune_heads(self, heads): raise NotImplementedError @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += tf.dtypes.cast(tf.math.greater(relative_position, 0), tf.int32) * num_buckets relative_position = tf.math.abs(relative_position) else: relative_position = -tf.math.minimum(relative_position, 0) max_exact = num_buckets // 2 is_small = tf.math.less(relative_position, max_exact) relative_position_if_large = max_exact + tf.dtypes.cast( tf.math.log(tf.dtypes.cast(relative_position, tf.float32) / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact), tf.int32, ) relative_position_if_large = tf.math.minimum(relative_position_if_large, num_buckets - 1) relative_buckets += tf.where(is_small, relative_position, relative_position_if_large) return relative_buckets
MIT License
asteroid-team/asteroid
asteroid/dsp/overlap_add.py
LambdaOverlapAdd.ola_forward
python
def ola_forward(self, x): assert x.ndim == 3 batch, channels, n_frames = x.size() unfolded = torch.nn.functional.unfold( x.unsqueeze(-1), kernel_size=(self.window_size, 1), padding=(self.window_size, 0), stride=(self.hop_size, 1), ) out = [] n_chunks = unfolded.shape[-1] for frame_idx in range(n_chunks): frame = self.nnet(unfolded[..., frame_idx]) if frame_idx == 0: assert frame.ndim == 3, "nnet should return (batch, n_src, time)" if self.n_src is not None: assert frame.shape[1] == self.n_src, "nnet should return (batch, n_src, time)" n_src = frame.shape[1] frame = frame.reshape(batch * n_src, -1) if frame_idx != 0 and self.reorder_chunks: frame = _reorder_sources(frame, out[-1], n_src, self.window_size, self.hop_size) if self.use_window: frame = frame * self.window else: frame = frame / (self.window_size / self.hop_size) out.append(frame) out = torch.stack(out).reshape(n_chunks, batch * n_src, self.window_size) out = out.permute(1, 2, 0) out = torch.nn.functional.fold( out, (n_frames, 1), kernel_size=(self.window_size, 1), padding=(self.window_size, 0), stride=(self.hop_size, 1), ) return out.squeeze(-1).reshape(batch, n_src, -1)
Heart of the class: segment signal, apply func, combine with OLA.
https://github.com/asteroid-team/asteroid/blob/64e10e9de840ada77719ff4fa280be42a19aa51c/asteroid/dsp/overlap_add.py#L84-L131
import torch from torch import nn from ..losses.pit_wrapper import PITReorder class LambdaOverlapAdd(torch.nn.Module): def __init__( self, nnet, n_src, window_size, hop_size=None, window="hanning", reorder_chunks=True, enable_grad=False, ): super().__init__() assert window_size % 2 == 0, "Window size must be even" self.nnet = nnet self.window_size = window_size self.hop_size = hop_size if hop_size is not None else window_size // 2 self.n_src = n_src self.in_channels = getattr(nnet, "in_channels", None) if window: from scipy.signal import get_window window = get_window(window, self.window_size).astype("float32") window = torch.from_numpy(window) self.use_window = True else: self.use_window = False self.register_buffer("window", window) self.reorder_chunks = reorder_chunks self.enable_grad = enable_grad
MIT License
conchylicultor/musicgenerator
deepmusic/modulemanager.py
ModuleManager.save
python
def save(self, config_group): config_group[self.name] = ' '.join([self.module_name] + self.module_parameters)
Save the current module parameters Args: config_group (dict): dictionary where to write the configuration
https://github.com/conchylicultor/musicgenerator/blob/adea76dccaba923b7d3807082ec6f5b512d16bb9/deepmusic/modulemanager.py#L111-L117
from collections import OrderedDict class ModuleManager: def __init__(self, name): self.name = name self.modules = OrderedDict() self.module_instance = None self.module_name = '' self.module_parameters = [] def register(self, module): assert not module.get_module_id() in self.modules self.modules[module.get_module_id()] = module def get_modules_ids(self): return self.modules.keys() def get_chosen_name(self): return self.module_name def get_module(self): assert self.module_instance is not None return self.module_instance def build_module(self, args): assert self.module_instance is None module_args = getattr(args, self.name) self.module_name = module_args[0] self.module_parameters = module_args[1:] self.module_instance = self.modules[self.module_name](args, *self.module_parameters) return self.module_instance def add_argparse(self, group_args, comment): assert len(self.modules.keys()) keys = list(self.modules.keys()) group_args.add_argument( '--{}'.format(self.name), type=str, nargs='+', default=[keys[0]], help=comment + ' Choices available: {}'.format(', '.join(keys)) )
Apache License 2.0
markblundeberg/openswap
lib/util.py
bh2u
python
def bh2u(x): return hfu(x).decode('ascii')
str with hex representation of a bytes-like object >>> x = bytes((1, 2, 10)) >>> bh2u(x) '01020A' :param x: bytes :rtype: str
https://github.com/markblundeberg/openswap/blob/7de04aa80dab79bebe4b64483011dad70a48694c/lib/util.py#L356-L367
import binascii import os, sys, re, json from collections import defaultdict from datetime import datetime import decimal from decimal import Decimal import traceback import threading import hmac import stat from .i18n import _ import queue def inv_dict(d): return {v: k for k, v in d.items()} base_units = {'BCH':8, 'mBCH':5, 'cash':2} fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')] def normalize_version(v): return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] class NotEnoughFunds(Exception): pass class ExcessiveFee(Exception): pass class InvalidPassword(Exception): def __str__(self): return _("Incorrect password") class FileImportFailed(Exception): def __str__(self): return _("Failed to import file.") class FileImportFailedEncrypted(FileImportFailed): def __str__(self): return (_('Failed to import file.') + ' ' + _('Perhaps it is encrypted...') + '\n' + _('Importing encrypted files is not supported.')) class UserCancelled(Exception): pass class MyEncoder(json.JSONEncoder): def default(self, obj): from .transaction import Transaction if isinstance(obj, Transaction): return obj.as_dict() return super(MyEncoder, self).default(obj) class PrintError(object): def diagnostic_name(self): return self.__class__.__name__ def print_error(self, *msg): print_error("[%s]" % self.diagnostic_name(), *msg) def print_stderr(self, *msg): print_stderr("[%s]" % self.diagnostic_name(), *msg) def print_msg(self, *msg): print_msg("[%s]" % self.diagnostic_name(), *msg) class ThreadJob(PrintError): def run(self): pass class DebugMem(ThreadJob): def __init__(self, classes, interval=30): self.next_time = 0 self.classes = classes self.interval = interval def mem_stats(self): import gc self.print_error("Start memscan") gc.collect() objmap = defaultdict(list) for obj in gc.get_objects(): for class_ in self.classes: if isinstance(obj, class_): objmap[class_].append(obj) for class_, objs in objmap.items(): self.print_error("%s: %d" % (class_.__name__, len(objs))) self.print_error("Finish memscan") def run(self): if time.time() > self.next_time: self.mem_stats() self.next_time = time.time() + self.interval class DaemonThread(threading.Thread, PrintError): def __init__(self): threading.Thread.__init__(self) self.parent_thread = threading.currentThread() self.running = False self.running_lock = threading.Lock() self.job_lock = threading.Lock() self.jobs = [] def add_jobs(self, jobs): with self.job_lock: self.jobs.extend(jobs) def run_jobs(self): with self.job_lock: for job in self.jobs: try: job.run() except Exception as e: traceback.print_exc(file=sys.stderr) def remove_jobs(self, jobs): with self.job_lock: for job in jobs: self.jobs.remove(job) def start(self): with self.running_lock: self.running = True return threading.Thread.start(self) def is_running(self): with self.running_lock: return self.running and self.parent_thread.is_alive() def stop(self): with self.running_lock: self.running = False def on_stop(self): if 'ANDROID_DATA' in os.environ: try: import jnius jnius.detach() self.print_error("jnius detach") except ImportError: pass self.print_error("stopped") is_verbose = True def set_verbosity(b): global is_verbose is_verbose = b class cachedproperty(object): def __init__(self, f): self.f = f def __get__(self, obj, type): obj = obj or type value = self.f(obj) setattr(obj, self.f.__name__, value) return value def print_error(*args): if not is_verbose: return print_stderr(*args) def print_stderr(*args): args = [str(item) for item in args] sys.stderr.write(" ".join(args) + "\n") sys.stderr.flush() def print_msg(*args): args = [str(item) for item in args] sys.stdout.write(" ".join(args) + "\n") sys.stdout.flush() def json_encode(obj): try: s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder) except TypeError: s = repr(obj) return s def json_decode(x): try: return json.loads(x, parse_float=Decimal) except: return x def constant_time_compare(val1, val2): return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8')) def profiler(func): def do_profile(func, args, kw_args): n = func.__name__ t0 = time.time() o = func(*args, **kw_args) t = time.time() - t0 print_error("[profiler]", n, "%.4f"%t) return o return lambda *args, **kw_args: do_profile(func, args, kw_args) def android_ext_dir(): try: import jnius env = jnius.autoclass('android.os.Environment') except ImportError: from android.os import Environment as env return env.getExternalStorageDirectory().getPath() def android_data_dir(): try: import jnius context = jnius.autoclass('org.kivy.android.PythonActivity').mActivity except ImportError: from com.chaquo.python import Python context = Python.getPlatform().getApplication() return context.getFilesDir().getPath() + '/data' def android_headers_dir(): try: import jnius d = android_ext_dir() + '/org.electron.electron' if not os.path.exists(d): os.mkdir(d) return d except ImportError: return android_data_dir() def ensure_sparse_file(filename): if os.name == "nt": try: os.system("fsutil sparse setFlag \""+ filename +"\" 1") except: pass def get_headers_dir(config): return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path def assert_datadir_available(config_path): path = config_path if os.path.exists(path): return else: raise FileNotFoundError( 'Electron Cash datadir does not exist. Was it deleted while running?' + '\n' + 'Should be at {}'.format(path)) def assert_file_in_datadir_available(path, config_path): if os.path.exists(path): return else: assert_datadir_available(config_path) raise FileNotFoundError( 'Cannot find file but datadir is there.' + '\n' + 'Should be at {}'.format(path)) def assert_bytes(*args): try: for x in args: assert isinstance(x, (bytes, bytearray)) except: print('assert bytes failed', list(map(type, args))) raise def assert_str(*args): for x in args: assert isinstance(x, str) def to_string(x, enc): if isinstance(x, (bytes, bytearray)): return x.decode(enc) if isinstance(x, str): return x else: raise TypeError("Not a string or bytes like object") def to_bytes(something, encoding='utf8'): if isinstance(something, bytes): return something if isinstance(something, str): return something.encode(encoding) elif isinstance(something, bytearray): return bytes(something) else: raise TypeError("Not a string or bytes like object") bfh = bytes.fromhex hfu = binascii.hexlify
MIT License
spilchen/yahoo_fantasy_api
yahoo_fantasy_api/league.py
League.edit_date
python
def edit_date(self): if self.edit_date_cache is None: json = self.yhandler.get_settings_raw(self.league_id) t = objectpath.Tree(json) edit_key = t.execute('$..edit_key[0]') self.edit_date_cache = datetime.datetime.strptime(edit_key, '%Y-%m-%d').date() return self.edit_date_cache
Return the next day that you can edit the lineups. :return: edit date :rtype: :class: datetime.date
https://github.com/spilchen/yahoo_fantasy_api/blob/867444eecffe46541c9c099f4ffc06ab5c178bd2/yahoo_fantasy_api/league.py#L579-L591
import yahoo_fantasy_api as yfa from yahoo_fantasy_api import yhandler import objectpath import datetime import re class League: def __init__(self, sc, league_id): self.sc = sc self.league_id = league_id self.yhandler = yhandler.YHandler(sc) self.current_week_cache = None self.end_week_cache = None self.week_date_range_cache = {} self.free_agent_cache = {} self.waivers_cache = None self.taken_players_cache = None self.stat_categories_cache = None self.settings_cache = None self.edit_date_cache = None self.positions_cache = None self.stats_id_map = None self.player_details_cache = {} def inject_yhandler(self, yhandler): self.yhandler = yhandler def to_team(self, team_key): tm = yfa.Team(self.sc, team_key) tm.inject_yhandler(self.yhandler) return tm def standings(self): json = self.yhandler.get_standings_raw(self.league_id) t = objectpath.Tree(json) num_teams = int(t.execute('$..count[0]')) standings = [] for i in range(num_teams): team = {} for e in t.execute('$..teams.."{}".team[0]'.format(i)): if isinstance(e, list): for td in e: if "team_key" in td or 'name' in td: self._merge_dicts(team, td, []) elif "team_standings" in e: self._merge_dicts(team, e['team_standings'], []) standings.append(team) return standings def teams(self): json = self.yhandler.get_standings_raw(self.league_id) t = objectpath.Tree(json) num_teams = int(t.execute('$..count[0]')) teams = {} for i in range(num_teams): team = {} key = None for e in t.execute('$..teams.."{}".team[0][0]'.format(i)): if "team_key" in e: key = e['team_key'] if isinstance(e, dict): self._merge_dicts(team, e, []) teams[key] = team return teams def matchups(self, week=None): json = self.yhandler.get_scoreboard_raw(self.league_id, week=week) return json def settings(self): if self.settings_cache is None: json = self.yhandler.get_settings_raw(self.league_id) data = {} if "fantasy_content" in json: content = json["fantasy_content"] if "league" in content: self._merge_dicts(data, content["league"][0], []) self._merge_dicts(data, content["league"][1]["settings"][0], ["roster_positions", "stat_categories"]) self.settings_cache = data return self.settings_cache def stat_categories(self): if self.stat_categories_cache is None: t = objectpath.Tree(self.yhandler.get_settings_raw(self.league_id)) json = t.execute('$..stat_categories..stat') simple_stat = [] for s in json: if 'is_only_display_stat' not in s: simple_stat.append({"display_name": s["display_name"], "position_type": s["position_type"]}) self.stat_categories_cache = simple_stat return self.stat_categories_cache def team_key(self): t = objectpath.Tree(self.yhandler.get_teams_raw()) json = t.execute('$..(team_key)') for t in json: if t['team_key'].startswith(self.league_id): return t['team_key'] def current_week(self): if self.current_week_cache is None: t = objectpath.Tree(self.yhandler.get_scoreboard_raw( self.league_id)) self.current_week_cache = int(t.execute('$..current_week[0]')) return self.current_week_cache def end_week(self): if self.end_week_cache is None: t = objectpath.Tree( self.yhandler.get_scoreboard_raw(self.league_id)) self.end_week_cache = int(t.execute('$..end_week[0]')) return self.end_week_cache def week_date_range(self, week): if week <= self.current_week() or week == 1: return self._date_range_of_played_or_current_week(week) elif week == self.current_week() + 1: (cur_st, cur_end) = self._date_range_of_played_or_current_week( week - 1) req_st = cur_end + datetime.timedelta(days=1) req_end = cur_end + datetime.timedelta(days=7) return (req_st, req_end) else: raise RuntimeError("Cannot request date range more than one week " "past the current week. The requested week is " "{}, but current week is {}.".format( week, self.current_week())) def free_agents(self, position): if position not in self.free_agent_cache: self.free_agent_cache[position] = self._fetch_players( 'FA', position=position) return self.free_agent_cache[position] def waivers(self): if not self.waivers_cache: self.waivers_cache = self._fetch_players('W') return self.waivers_cache def taken_players(self): if not self.taken_players_cache: self.taken_players_cache = self._fetch_players('T') return self.taken_players_cache def _fetch_players(self, status, position=None): PLAYERS_PER_PAGE = 25 plyrs = [] plyrIndex = 0 while plyrIndex % PLAYERS_PER_PAGE == 0: j = self.yhandler.get_players_raw(self.league_id, plyrIndex, status, position=position) (num_plyrs_on_pg, fa_on_pg) = self._players_from_page(j) if len(fa_on_pg) == 0: break plyrs += fa_on_pg plyrIndex += num_plyrs_on_pg return plyrs def _players_from_page(self, page): fa = [] if len(page['fantasy_content']['league'][1]['players']) == 0: return (0, fa) t = objectpath.Tree(page) pct_owns = self._pct_owned_from_page(iter(list(t.execute( '$..percent_owned.(coverage_type,value)')))) for i, pct_own in zip(range(0, t.execute('$..players.count[0]')*2, 2), pct_owns): path = '$..players..player[{}].'.format(i) + "(name,player_id,position_type,status,eligible_positions)" obj = list(t.execute(path)) plyr = {} for ele in obj: for k in ele.keys(): plyr[k] = ele[k] plyr['player_id'] = int(plyr['player_id']) plyr['name'] = plyr['name']['full'] plyr['eligible_positions'] = [e['position'] for e in plyr['eligible_positions']] plyr['percent_owned'] = pct_own if "status" not in plyr: plyr["status"] = "" if plyr["status"] != "NA": fa.append(plyr) return (i/2 + 1, fa) def _pct_owned_from_page(self, po_it): po = [] i = 0 try: while True: ele = next(po_it) if "coverage_type" in ele: po.append(0) i += 1 if "value" in ele: po[i-1] = ele['value'] except StopIteration: pass return po def _date_range_of_played_or_current_week(self, week): if week not in self.week_date_range_cache: t = objectpath.Tree(self.yhandler.get_scoreboard_raw( self.league_id, week)) j = t.execute('$..(week_start,week_end)[0]') self.week_date_range_cache[week] = ( datetime.datetime.strptime(j['week_start'], "%Y-%m-%d").date(), datetime.datetime.strptime(j['week_end'], "%Y-%m-%d").date()) return self.week_date_range_cache[week] def player_details(self, player): if isinstance(player, int): player = [player] self._cache_player_details(player) players = [] if isinstance(player, list): for p in player: players.append(self.player_details_cache[p]) elif player in self.player_details_cache: assert(isinstance(self.player_details_cache[player], list)) players = self.player_details_cache[player] return players def percent_owned(self, player_ids): t = objectpath.Tree(self.yhandler.get_percent_owned_raw( self.league_id, player_ids)) player_ids = t.execute("$..player_id") it = t.execute("$..(player_id,full,value)") po = [] try: while True: plyr = {"player_id": int(next(it)["player_id"]), "name": next(it)["full"], "percent_owned": next(it)["value"]} po.append(plyr) except StopIteration: pass return po def ownership(self, player_ids): t = objectpath.Tree(self.yhandler.get_player_ownership_raw(self.league_id, player_ids)) owner_details = t.execute("$..(player_id,ownership_type,owner_team_name)") ownership = {} try: while True: player_id = next(owner_details)['player_id'] ownership_details = next(owner_details) ownership[player_id] = ownership_details except StopIteration: pass return ownership
MIT License
iristyle/chocolateypackages
EthanBrown.SublimeText2.WebPackages/tools/PackageCache/SublimeLinter/sublimelinter/modules/libs/pyflakes/checker.py
Checker._runDeferred
python
def _runDeferred(self, deferred): for handler, scope in deferred: self.scopeStack = scope handler()
Run the callables in C{deferred} using their associated scope stack.
https://github.com/iristyle/chocolateypackages/blob/8c9833710577de6db6e8b1db5d9196e19e19d117/EthanBrown.SublimeText2.WebPackages/tools/PackageCache/SublimeLinter/sublimelinter/modules/libs/pyflakes/checker.py#L229-L235
import __builtin__ import os.path import _ast from pyflakes import messages try: import ast iter_child_nodes = ast.iter_child_nodes except (ImportError, AttributeError): def iter_child_nodes(node, astcls=_ast.AST): for name in node._fields: field = getattr(node, name, None) if isinstance(field, astcls): yield field elif isinstance(field, list): for item in field: yield item class Binding(object): def __init__(self, name, source): self.name = name self.source = source self.used = False def __str__(self): return self.name def __repr__(self): return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__, self.name, self.source.lineno, id(self)) class UnBinding(Binding): class Importation(Binding): def __init__(self, name, source): self.fullName = name name = name.split('.')[0] super(Importation, self).__init__(name, source) class Argument(Binding): class Assignment(Binding): class FunctionDefinition(Binding): pass class ExportBinding(Binding): def names(self): names = [] if isinstance(self.source, _ast.List): for node in self.source.elts: if isinstance(node, _ast.Str): names.append(node.s) return names class Scope(dict): importStarred = False def __repr__(self): return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), dict.__repr__(self)) def __init__(self): super(Scope, self).__init__() class ClassScope(Scope): pass class FunctionScope(Scope): def __init__(self): super(FunctionScope, self).__init__() self.globals = {} class ModuleScope(Scope): pass _MAGIC_GLOBALS = ['__file__', '__builtins__'] class Checker(object): nodeDepth = 0 traceTree = False def __init__(self, tree, filename='(none)'): self._deferredFunctions = [] self._deferredAssignments = [] self.dead_scopes = [] self.messages = [] self.filename = filename self.scopeStack = [ModuleScope()] self.futuresAllowed = True self.handleChildren(tree) self._runDeferred(self._deferredFunctions) self._deferredFunctions = None self._runDeferred(self._deferredAssignments) self._deferredAssignments = None del self.scopeStack[1:] self.popScope() self.check_dead_scopes() def deferFunction(self, callable): self._deferredFunctions.append((callable, self.scopeStack[:])) def deferAssignment(self, callable): self._deferredAssignments.append((callable, self.scopeStack[:]))
MIT License
artyompal/tpu_models
models/official/detection/evaluation/coco_utils.py
generate_annotation_file
python
def generate_annotation_file(groundtruth_generator, annotation_file): groundtruths = {} tf.logging.info('Loading groundtruth annotations from dataset to memory...') for groundtruth in groundtruth_generator(): for k, v in six.iteritems(groundtruth): if k not in groundtruths: groundtruths[k] = [v] else: groundtruths[k].append(v) gt_dataset = convert_groundtruths_to_coco_dataset(groundtruths) tf.logging.info('Saving groundtruth annotations to the JSON file...') with tf.gfile.Open(annotation_file, 'w') as f: f.write(json.dumps(gt_dataset)) tf.logging.info('Done saving the JSON file...')
Generates COCO-style annotation JSON file given a groundtruth generator.
https://github.com/artyompal/tpu_models/blob/639306f30e085bb1cdb5b1118a4c96a2dbe14e3e/models/official/detection/evaluation/coco_utils.py#L345-L361
from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import json import numpy as np from PIL import Image from pycocotools import coco from pycocotools import mask as mask_utils import six import tensorflow as tf from dataloader import tf_example_decoder from utils import box_utils class COCOWrapper(coco.COCO): def __init__(self, eval_type='box', annotation_file=None, gt_dataset=None): if ((annotation_file and gt_dataset) or ((not annotation_file) and (not gt_dataset))): raise ValueError('One and only one of `annotation_file` and `gt_dataset` ' 'needs to be specified.') if eval_type not in ['box', 'mask']: raise ValueError('The `eval_type` can only be either `box` or `mask`.') coco.COCO.__init__(self, annotation_file=annotation_file) self._eval_type = eval_type if gt_dataset: self.dataset = gt_dataset self.createIndex() def loadRes(self, predictions): res = coco.COCO() res.dataset['images'] = copy.deepcopy(self.dataset['images']) res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) image_ids = [ann['image_id'] for ann in predictions] if set(image_ids) != (set(image_ids) & set(self.getImgIds())): raise ValueError('Results do not correspond to the current dataset!') for ann in predictions: x1, x2, y1, y2 = [ann['bbox'][0], ann['bbox'][0] + ann['bbox'][2], ann['bbox'][1], ann['bbox'][1] + ann['bbox'][3]] if self._eval_type == 'box': ann['area'] = ann['bbox'][2] * ann['bbox'][3] ann['segmentation'] = [ [x1, y1, x1, y2, x2, y2, x2, y1]] elif self._eval_type == 'mask': ann['bbox'] = mask_utils.toBbox(ann['segmentation']) ann['area'] = mask_utils.area(ann['segmentation']) res.dataset['annotations'] = copy.deepcopy(predictions) res.createIndex() return res def convert_predictions_to_coco_annotations(predictions): coco_predictions = [] num_batches = len(predictions['source_id']) batch_size = predictions['source_id'][0].shape[0] max_num_detections = predictions['detection_classes'][0].shape[1] for i in range(num_batches): for j in range(batch_size): for k in range(max_num_detections): ann = {} ann['image_id'] = predictions['source_id'][i][j] ann['category_id'] = predictions['detection_classes'][i][j, k] boxes = predictions['detection_boxes'][i] ann['bbox'] = [ boxes[j, k, 1], boxes[j, k, 0], boxes[j, k, 3] - boxes[j, k, 1], boxes[j, k, 2] - boxes[j, k, 0]] ann['score'] = predictions['detection_scores'][i][j, k] if 'detection_masks' in predictions: encoded_mask = mask_utils.encode( np.asfortranarray( predictions['detection_masks'][i][j, k].astype(np.uint8))) ann['segmentation'] = encoded_mask coco_predictions.append(ann) for i, ann in enumerate(coco_predictions): ann['id'] = i + 1 return coco_predictions def convert_groundtruths_to_coco_dataset(groundtruths, label_map=None): source_ids = np.concatenate(groundtruths['source_id'], axis=0) heights = np.concatenate(groundtruths['height'], axis=0) widths = np.concatenate(groundtruths['width'], axis=0) gt_images = [{'id': int(i), 'height': int(h), 'width': int(w)} for i, h, w in zip(source_ids, heights, widths)] gt_annotations = [] num_batches = len(groundtruths['source_id']) batch_size = groundtruths['source_id'][0].shape[0] for i in range(num_batches): for j in range(batch_size): num_instances = groundtruths['num_detections'][i][j] for k in range(num_instances): ann = {} ann['image_id'] = int(groundtruths['source_id'][i][j]) if 'is_crowds' in groundtruths: ann['iscrowd'] = int(groundtruths['is_crowds'][i][j, k]) else: ann['iscrowd'] = 0 ann['category_id'] = int(groundtruths['classes'][i][j, k]) boxes = groundtruths['boxes'][i] ann['bbox'] = [ float(boxes[j, k, 1]), float(boxes[j, k, 0]), float(boxes[j, k, 3] - boxes[j, k, 1]), float(boxes[j, k, 2] - boxes[j, k, 0])] if 'areas' in groundtruths: ann['area'] = float(groundtruths['areas'][i][j, k]) else: ann['area'] = float( (boxes[j, k, 3] - boxes[j, k, 1]) * (boxes[j, k, 2] - boxes[j, k, 0])) if 'masks' in groundtruths: mask = Image.open(six.StringIO(groundtruths['masks'][i][j, k])) width, height = mask.size np_mask = ( np.array(mask.getdata()).reshape(height, width).astype(np.uint8)) np_mask[np_mask > 0] = 255 encoded_mask = mask_utils.encode(np.asfortranarray(np_mask)) ann['segmentation'] = encoded_mask if 'areas' not in groundtruths: ann['area'] = mask_utils.area(encoded_mask) gt_annotations.append(ann) for i, ann in enumerate(gt_annotations): ann['id'] = i + 1 if label_map: gt_categories = [{'id': i, 'name': label_map[i]} for i in label_map] else: category_ids = [gt['category_id'] for gt in gt_annotations] gt_categories = [{'id': i} for i in set(category_ids)] gt_dataset = { 'images': gt_images, 'categories': gt_categories, 'annotations': copy.deepcopy(gt_annotations), } return gt_dataset class COCOGroundtruthGenerator(object): def __init__(self, file_pattern, num_examples, include_mask): self._file_pattern = file_pattern self._num_examples = num_examples self._include_mask = include_mask self._dataset_fn = tf.data.TFRecordDataset def _parse_single_example(self, example): decoder = tf_example_decoder.TfExampleDecoder( include_mask=self._include_mask) decoded_tensors = decoder.decode(example) image = decoded_tensors['image'] image_size = tf.shape(image)[0:2] boxes = box_utils.denormalize_boxes( decoded_tensors['groundtruth_boxes'], image_size) groundtruths = { 'source_id': tf.string_to_number( decoded_tensors['source_id'], out_type=tf.int64), 'height': decoded_tensors['height'], 'width': decoded_tensors['width'], 'num_detections': tf.shape(decoded_tensors['groundtruth_classes'])[0], 'boxes': boxes, 'classes': decoded_tensors['groundtruth_classes'], 'is_crowds': decoded_tensors['groundtruth_is_crowd'], 'areas': decoded_tensors['groundtruth_area'], } if self._include_mask: groundtruths.update({ 'masks': decoded_tensors['groundtruth_instance_masks_png'], }) return groundtruths def _build_pipeline(self): dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False) dataset = dataset.apply( tf.data.experimental.parallel_interleave( lambda filename: self._dataset_fn(filename).prefetch(1), cycle_length=32, sloppy=False)) dataset = dataset.map(self._parse_single_example, num_parallel_calls=64) dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE) dataset = dataset.batch(1, drop_remainder=False) return dataset def __call__(self): with tf.Graph().as_default(): dataset = self._build_pipeline() groundtruth = dataset.make_one_shot_iterator().get_next() with tf.Session() as sess: for _ in range(self._num_examples): groundtruth_result = sess.run(groundtruth) yield groundtruth_result def scan_and_generator_annotation_file(file_pattern, num_samples, include_mask, annotation_file): groundtruth_generator = COCOGroundtruthGenerator( file_pattern, num_samples, include_mask) generate_annotation_file(groundtruth_generator, annotation_file)
Apache License 2.0
e-loue/pyke
pyke/target_pkg.py
target_pkg.reset
python
def reset(self, check_sources = True): if debug: print >> sys.stderr, "target_pkg.reset" self.dirty = False self.check_sources = check_sources self.source_packages = {} self.compiled_targets = set() self.rb_names = set()
This should be called once by engine.__init__ prior to calling add_source_package.
https://github.com/e-loue/pyke/blob/cfe95d8aaa06de123264f9b7f5bea20eb5924ecd/pyke/target_pkg.py#L180-L192
from __future__ import with_statement import os, os.path import time import sys import re import pyke debug = False Name_test = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*$') class target_pkg(object): def __init__(self, module_name, filename = None, pyke_version = pyke.version, loader = None, sources = None, compiler_version = 0): self.package_name = module_name.rsplit('.', 1)[0] if sources is None: try: target_package_dir = os.path.dirname(import_(self.package_name).__file__) except ImportError: if debug: print >> sys.stderr, "target_pkg: no target package", self.package_name last_dot = self.package_name.rfind('.') if last_dot < 0: assert filename is not None package_parent_dir = os.path.dirname(os.path.dirname(filename)) else: package_parent_dir = os.path.dirname( import_(self.package_name[:last_dot]).__file__) if filename is not None: assert os.path.normpath( os.path.abspath(package_parent_dir)) == os.path.normpath( os.path.dirname(os.path.dirname(filename))), "Internal error: %r != %r" % ( os.path.normpath( os.path.abspath(package_parent_dir)), os.path.normpath( os.path.dirname(os.path.dirname(filename)))) if debug: print >> sys.stderr, "target_pkg package_parent_dir:", package_parent_dir target_package_dir = os.path.join(package_parent_dir, self.package_name[last_dot + 1:]) if debug: print >> sys.stderr, "target_pkg target_package_dir:", target_package_dir if not os.path.lexists(target_package_dir): if debug: print >> sys.stderr, "target_pkg: mkdir", target_package_dir os.mkdir(target_package_dir) init_filepath = os.path.join(target_package_dir, '__init__.py') if debug: print >> sys.stderr, "target_pkg init_filepath:", init_filepath if not os.path.lexists(init_filepath): if debug: print >> sys.stderr, "target_pkg: creating", init_filepath open(init_filepath, 'w').close() filename = os.path.join(target_package_dir, 'compiled_pyke_files.py') if filename.endswith('.py'): self.filename = filename else: self.filename = filename[:-1] self.directory = os.path.dirname(self.filename) if debug: print >> sys.stderr, "target_pkg:", self.package_name, self.filename self.loader = loader if compiler_version == pyke.compiler_version: self.sources = sources if sources is not None else {} elif self.loader is None: self.sources = {} else: raise AssertionError("%s: wrong version of pyke, " "running %s, compiled for %s" % (module_name, pyke.version, pyke_version))
MIT License
zomux/deepy
deepy/trainers/base.py
NeuralTrainer.load_params
python
def load_params(self, path, exclude_free_params=False): self.network.load_params(path, exclude_free_params=exclude_free_params) self.best_params = self.copy_params() if self.network.train_logger.progress() > 0 or self.network.train_logger.epoch() > 0: self.skip(self.network.train_logger.progress(), self.network.train_logger.epoch() - 1)
Load parameters for the training. This method can load free parameters and resume the training progress.
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/trainers/base.py#L144-L153
import sys import time import numpy as np import theano from ..conf import TrainerConfig from ..core import env, runtime from ..utils import Timer from ..dataset import Dataset from controllers import TrainingController from abc import ABCMeta, abstractmethod from logging import getLogger logging = getLogger("trainer") class NeuralTrainer(object): __metaclass__ = ABCMeta def __init__(self, network, config=None, validator=None, annealer=None): super(NeuralTrainer, self).__init__() self.config = None if isinstance(config, TrainerConfig): self.config = config elif isinstance(config, dict): self.config = TrainerConfig(config) else: self.config = TrainerConfig() if type(self.config.learning_rate) == float: self.config.learning_rate = np.array(self.config.learning_rate, dtype=env.FLOATX) self.model = self.network = network self.network.prepare_training() self._setup_costs() self.evaluation_func = None self.validation_frequency = self.config.validation_frequency self.min_improvement = self.config.min_improvement self.patience = self.config.patience self._iter_controllers = [] self._epoch_controllers = [] if annealer: annealer.bind(self) self._epoch_controllers.append(annealer) if validator: validator.bind(self) self._iter_controllers.append(validator) self.best_cost = 1e100 self.best_epoch = 0 self.best_params = self.copy_params() self._skip_batches = 0 self._skip_epochs = 0 self._progress = 0 self.last_cost = 0 self.last_run_costs = None self._report_time = True self._epoch = 0 self._current_train_set = None self._current_valid_set = None self._current_test_set = None self._ended = False def _compile_evaluation_func(self): if not self.evaluation_func: logging.info("compile evaluation function") self.evaluation_func = theano.function( self.network.input_variables + self.network.target_variables, self.evaluation_variables, updates=self.network.updates, allow_input_downcast=True, mode=self.config.get("theano_mode", None)) def skip(self, n_batches, n_epochs=0): logging.info("skip %d epochs and %d batches" % (n_epochs, n_batches)) self._skip_batches = n_batches self._skip_epochs = n_epochs def epoch(self): return self._epoch def _setup_costs(self): self.cost = self._add_regularization(self.network.cost) self.test_cost = self._add_regularization(self.network.test_cost) self.training_variables = [self.cost] self.training_names = ['J'] for name, monitor in self.network.training_monitors: self.training_names.append(name) self.training_variables.append(monitor) logging.info("monitor list: %s" % ",".join(self.training_names)) self.evaluation_variables = [self.test_cost] self.evaluation_names = ['J'] for name, monitor in self.network.testing_monitors: self.evaluation_names.append(name) self.evaluation_variables.append(monitor) def _add_regularization(self, cost): if self.config.weight_l1 > 0: logging.info("L1 weight regularization: %f" % self.config.weight_l1) cost += self.config.weight_l1 * sum(abs(w).sum() for w in self.network.parameters) if self.config.hidden_l1 > 0: logging.info("L1 hidden unit regularization: %f" % self.config.hidden_l1) cost += self.config.hidden_l1 * sum(abs(h).mean(axis=0).sum() for h in self.network._hidden_outputs) if self.config.hidden_l2 > 0: logging.info("L2 hidden unit regularization: %f" % self.config.hidden_l2) cost += self.config.hidden_l2 * sum((h * h).mean(axis=0).sum() for h in self.network._hidden_outputs) return cost def set_params(self, targets, free_params=None): for param, target in zip(self.network.parameters, targets): param.set_value(target) if free_params: for param, param_value in zip(self.network.free_parameters, free_params): param.set_value(param_value) def save_params(self, path): self.set_params(*self.best_params) self.network.save_params(path)
MIT License
neuropycon/graphpype
graphpype/labeled_mask.py
compute_ROI_nii_from_ROI_coords_files
python
def compute_ROI_nii_from_ROI_coords_files( ref_img_file, MNI_coords_file, labels_file, neighbourhood=1): ref_image = nib.load(ref_img_file) ref_image_data = ref_image.get_data() ref_image_data_shape = ref_image_data.shape ref_image_data_sform = ref_image.get_sform() ROI_MNI_coords_list = np.array(np.loadtxt( MNI_coords_file), dtype='int').tolist() ROI_labels = [lign.strip() for lign in open(labels_file)] mni_sform_inv = np.linalg.inv(ref_image_data_sform) ROI_coords = np.array([_coord_transform(x, y, z, mni_sform_inv) for x, y, z in ROI_MNI_coords_list], dtype="int64") for i, ROI_coord in enumerate(ROI_coords): ROI_coords_labelled_mask = np.zeros( shape=ref_image_data_shape, dtype='int64') neigh_range = list(range(-neighbourhood, neighbourhood+1)) for relative_coord in iter.product(neigh_range, repeat=3): neigh_x, neigh_y, neigh_z = ROI_coord + relative_coord print(neigh_x, neigh_y, neigh_z) if check_np_dimension(ROI_coords_labelled_mask.shape, np.array([neigh_x, neigh_y, neigh_z], dtype='int64')): ROI_coords_labelled_mask[neigh_x, neigh_y, neigh_z] = 1 print(ROI_coords_labelled_mask) path, fname, ext = split_f(MNI_coords_file) ROI_coords_labelled_mask_file = os.path.join( path, "ROI_{}-neigh_{}_2.nii".format(ROI_labels[i], str(neighbourhood))) nib.save(nib.Nifti1Image( ROI_coords_labelled_mask, ref_image.affine, ref_image.header), ROI_coords_labelled_mask_file) return ROI_coords_labelled_mask_file
Export single file VOI binary nii image
https://github.com/neuropycon/graphpype/blob/409a370e7d293c3fcff0d733bf7af50850dfa9e4/graphpype/labeled_mask.py#L256-L309
import nipype.interfaces.spm as spm from nipype.utils.filemanip import split_filename as split_f from graphpype.utils import check_np_dimension import itertools as iter import numpy as np import nibabel as nib import glob import os from scipy import ndimage as ndimg from scipy.spatial.distance import cdist def _coord_transform(x, y, z, affine): coords = np.c_[np.atleast_1d(x).flat, np.atleast_1d(y).flat, np.atleast_1d(z).flat, np.ones_like(np.atleast_1d(z).flat)].T x, y, z, _ = np.dot(affine, coords) return x.squeeze(), y.squeeze(), z.squeeze() def create_indexed_mask(ref_img_file, MNI_coords_list, ROI_dir, ROI_mask_prefix="def", ROI_shape="cube", ROI_size=10): np_coord = np.array(MNI_coords_list) if len(np_coord.shape) > 1: dist = cdist(np_coord, np_coord, metric='euclidean') assert np.all(dist[np.triu_indices(dist.shape[0], k=1)] > ROI_size), "Error, distance < {}".format(ROI_size) ref_img = nib.load(ref_img_file) ref_img_shape = ref_img.get_data().shape if len(ref_img_shape) == 4: print("using 4D image for computing 3D mask, reducing shape") ref_img_shape = ref_img_shape[:-1] print(ref_img_shape) ref_img_affine = ref_img.affine inv_affine = np.linalg.inv(ref_img_affine) ref_img_hd = ref_img.header pixdims = ref_img_hd['pixdim'][1:4] indexed_mask_data = np.zeros(shape=ref_img_shape) - 1 if ROI_shape not in ["sphere", "cube"]: print("Warning, could not determine shape {}, using cube instead" .format(ROI_shape)) ROI_shape = "cube" if ROI_shape == "cube": print("ROI_shape = cube") vox_dims = list(map(int, float(ROI_size)/pixdims)) print(vox_dims) neigh_range = [] for vox_dim in vox_dims: vox_neigh = vox_dim/2 if vox_dim % 2 == 1: cur_range = np.arange(-vox_neigh, vox_neigh+1) elif vox_dim % 2 == 0: cur_range = np.arange(-vox_neigh+1, vox_neigh+1) neigh_range.append(cur_range) ROI_coords = [] for index_mask, MNI_coords in enumerate(MNI_coords_list): ijk_coord = _coord_transform(MNI_coords[0], MNI_coords[1], MNI_coords[2], inv_affine) neigh_coords = np.array( [list(i) for i in iter.product(*neigh_range)], dtype=int) cur_coords = np.array([list(map(int, ijk_coord + neigh_coord)) for neigh_coord in neigh_coords]) max_i, max_j, max_k = indexed_mask_data.shape keep = (0 <= cur_coords[:, 0]) & (cur_coords[:, 0] < max_i) & (0 <= cur_coords[:, 1]) & (cur_coords[:, 1] < max_j) & (0 <= cur_coords[:, 2]) & (cur_coords[:, 2] < max_k) if np.all(keep is False): continue indexed_mask_data[cur_coords[keep, 0], cur_coords[keep, 1], cur_coords[keep, 2]] = index_mask print(np.sum(indexed_mask_data == index_mask)) ROI_coords.append(ijk_coord) elif ROI_shape == "sphere": print("building spheres of {} mm".format(ROI_size)) radius = ROI_size/2.0 print(radius) vox_dims = list(map(int, float(radius)/pixdims)) print(vox_dims) r2_dim = [] neigh_range = [] for i, vox_dim in enumerate(vox_dims): pixdim = pixdims[i] cur_range = np.arange(-vox_dim, (vox_dim+1)) print(cur_range) cur_r2 = (cur_range*pixdim)**2 print(cur_r2) neigh_range.append(cur_range.tolist()) r2_dim.append(cur_r2) print(neigh_range) neigh_coords = np.array( [list(i) for i in iter.product(*neigh_range)], dtype=int) neigh_dist = np.array([np.sum(i) for i in iter.product(*r2_dim)]) neigh_range = neigh_coords[neigh_dist < radius**2] ROI_coords = [] for index_mask, MNI_coords in enumerate(MNI_coords_list): ijk_coord = np.dot(inv_affine, np.array( MNI_coords + [1], dtype='int'))[:-1] ROI_coords.append(ijk_coord) cur_coords = np.array([list(map(int, ijk_coord + neigh_coord)) for neigh_coord in neigh_range.tolist()]) indexed_mask_data[cur_coords[:, 0], cur_coords[:, 1], cur_coords[:, 2]] = index_mask print(np.sum(indexed_mask_data == index_mask)) try: os.makedirs(ROI_dir) except OSError: print("directory already created") indexed_mask_file = os.path.join( ROI_dir, "indexed_mask-" + ROI_mask_prefix + ".nii") nib.save(nib.Nifti1Image(indexed_mask_data, ref_img_affine), indexed_mask_file) ROI_coords_file = os.path.join( ROI_dir, "ROI_coords-" + ROI_mask_prefix + ".txt") np.savetxt(ROI_coords_file, np.array(ROI_coords, dtype=int), fmt="%d") return indexed_mask_file
BSD 3-Clause New or Revised License
sanic-org/sanic
sanic/server/socket.py
remove_unix_socket
python
def remove_unix_socket(path: Optional[str]) -> None: if not path: return try: if stat.S_ISSOCK(os.stat(path, follow_symlinks=False).st_mode): with socket.socket(socket.AF_UNIX) as testsock: try: testsock.connect(path) except ConnectionRefusedError: os.unlink(path) except FileNotFoundError: pass
Remove dead unix socket during server exit.
https://github.com/sanic-org/sanic/blob/3262878ebd41aa2230ef15d4475bbcf223b2356b/sanic/server/socket.py#L74-L87
from __future__ import annotations import os import secrets import socket import stat from ipaddress import ip_address from typing import Optional def bind_socket(host: str, port: int, *, backlog=100) -> socket.socket: try: ip = ip_address(host) host = str(ip) sock = socket.socket( socket.AF_INET6 if ip.version == 6 else socket.AF_INET ) except ValueError: sock = socket.socket() sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((host, port)) sock.listen(backlog) return sock def bind_unix_socket(path: str, *, mode=0o666, backlog=100) -> socket.socket: path = os.path.abspath(path) folder = os.path.dirname(path) if not os.path.isdir(folder): raise FileNotFoundError(f"Socket folder does not exist: {folder}") try: if not stat.S_ISSOCK(os.stat(path, follow_symlinks=False).st_mode): raise FileExistsError(f"Existing file is not a socket: {path}") except FileNotFoundError: pass tmp_path = f"{path}.{secrets.token_urlsafe()}" sock = socket.socket(socket.AF_UNIX) try: sock.bind(tmp_path) try: os.chmod(tmp_path, mode) sock.listen(backlog) os.rename(tmp_path, path) except: try: os.unlink(tmp_path) finally: raise except: try: sock.close() finally: raise return sock
MIT License
alexmohr/sonyapilib
tests/device_test.py
SonyDeviceTest.create_device
python
def create_device(): sonyapilib.device.TIMEOUT = 0.1 device = SonyDevice("test", "test") device.api_version = 3 device.cookies = jsonpickle.decode(read_file("data/cookies.json")) return device
Create a new device instance
https://github.com/alexmohr/sonyapilib/blob/50fd5839e5ffe057c472ae41d3c40e98b92b55a0/tests/device_test.py#L898-L904
import os.path import sys import unittest from inspect import getsourcefile from unittest import mock from urllib.parse import ( urljoin ) import jsonpickle from requests import HTTPError, URLRequired, RequestException from tests.testutil import read_file current_dir = os.path.dirname(os.path.abspath(getsourcefile(lambda: 0))) sys.path.insert(0, current_dir[:current_dir.rfind(os.path.sep)]) import sonyapilib.device from sonyapilib.ssdp import SSDPResponse from sonyapilib.device import SonyDevice, XmlApiObject, AuthenticationResult sys.path.pop(0) ACTION_LIST_URL = 'http://192.168.240.4:50002/actionList' ACTION_LIST_URL_2 = 'http://192.168.240.4:50002/actionList2' DMR_URL = 'http://test:52323/dmr.xml' IRCC_URL = 'http://test:50001/Ircc.xml' IRCC_URL_NO_SCHEMA = 'http://test_no_schema:50001/Ircc.xml' IRCC_URL_MISSING_INFO = 'http://test_missing_info:50001/Ircc.xml' SYSTEM_INFORMATION_URL = 'http://192.168.240.4:50002/getSystemInformation' SYSTEM_INFORMATION_URL_V4 = 'http://test/sony/system' GET_REMOTE_COMMAND_LIST_URL = 'http://192.168.240.4:50002/getRemoteCommandList' REGISTRATION_URL_LEGACY = 'http://192.168.240.4:50002/register' REGISTRATION_URL_V4 = 'http://192.168.170.23/sony/accessControl' REGISTRATION_URL_V4_FAIL = 'http://192.168.170.22/sony/accessControl' REGISTRATION_URL_V4_FAIL_401 = 'http://192.168.170.25/sony/accessControl' REGISTRATION_URL_V3_FAIL_401 = 'http://192.168.240.7:50002/register' COMMAND_LIST_V4 = 'http://192.168.240.4:50002/getRemoteCommandList' APP_LIST_URL = 'http://test:50202/appslist' APP_LIST_URL_V4 = 'http://test/DIAL/sony/applist' APP_START_URL_LEGACY = 'http://test:50202/apps/' APP_START_URL = 'http://test/DIAL/apps/' SOAP_URL = 'http://test/soap' GET_REMOTE_CONTROLLER_INFO_URL = "http://test/getRemoteControllerInfo" BASE_URL = 'http://test/sony' AV_TRANSPORT_URL = 'http://test:52323/upnp/control/AVTransport' AV_TRANSPORT_URL_NO_MEDIA = 'http://test2:52323/upnp/control/AVTransport' REQUESTS_ERROR = 'http://ERROR' ACTION_LIST = [ "getText", "sendText", "getContentInformation", "getSystemInformation", "getRemoteCommandList", "getStatus", "getHistoryList", "getContentUrl", "sendContentUrl" ] def mocked_return_none(*args, **kwargs): return None def mock_request_error(*args, **kwargs): raise HTTPError() def mock_error(*args, **kwargs): raise Exception() def mock_request_exception(*args, **kwargs): raise RequestException("Test Exception") def mock_nothing(*args, **kwargs): pass def mock_register_success(*args, **kwargs): return AuthenticationResult.SUCCESS def mock_discovery(*args, **kwargs): if args[0] == "urn:schemas-sony-com:service:IRCC:1": resp = SSDPResponse(None) resp.location = IRCC_URL return [resp] return None class MockResponseJson: def __init__(self, data): self.data = data def get(self, key): if key in self.data: return self.data[key] return None class MockResponse: def __init__(self, json_data, status_code, text=None, cookies=None): self.json_obj = MockResponseJson(json_data) self.status_code = status_code self.text = text self.cookies = cookies if text: self.content = text.encode() def json(self): return self.json_obj def raise_for_status(self): if self.status_code == 200: return error = HTTPError() error.response = self raise error def mocked_requests_empty(*args, **kwargs): return {} def mocked_requests_post(*args, **kwargs): url = args[0] print("POST for URL: {}".format(url)) if not url: raise URLRequired() elif url == REGISTRATION_URL_V4: return MockResponse({}, 200) elif url == REGISTRATION_URL_V4_FAIL: return MockResponse({"error": 402}, 200) elif url == REGISTRATION_URL_V4_FAIL_401: MockResponse(None, 401).raise_for_status() elif url == SOAP_URL: return MockResponse({}, 200, "data") elif url == urljoin(BASE_URL, 'system'): result = MockResponseJson({"status": "on"}) return MockResponse({"result": [result]}, 200) elif APP_START_URL_LEGACY in url: return MockResponse(None, 200) elif APP_START_URL in url: return MockResponse(None, 200) elif url == AV_TRANSPORT_URL: return MockResponse(None, 200, read_file( 'data/playing_status_legacy_playing.xml')) elif url == AV_TRANSPORT_URL_NO_MEDIA: return MockResponse(None, 200, read_file( 'data/playing_status_legacy_no_media.xml')) elif url == COMMAND_LIST_V4: json_data = jsonpickle.decode(read_file('data/commandList.json')) return MockResponse(json_data, 200, "") elif url == SYSTEM_INFORMATION_URL_V4: json_data = jsonpickle.decode(read_file('data/systemInformation.json')) return MockResponse(json_data, 200, "") elif url.startswith(REQUESTS_ERROR): raise RequestException else: raise ValueError("Unknown url requested: {}".format(url)) def mocked_requests_get(*args, **kwargs): url = args[0] print("GET for URL: {}".format(url)) if url == DMR_URL: return MockResponse(None, 200, read_file("data/dmr_v3.xml")) elif url == IRCC_URL: return MockResponse(None, 200, read_file("data/ircc.xml")) elif url == IRCC_URL_MISSING_INFO: return MockResponse(None, 200, read_file("data/ircc_missing_info.xml")) elif url == IRCC_URL_NO_SCHEMA: return MockResponse(None, 200, read_file("data/ircc_no_schema.xml")) elif url == ACTION_LIST_URL: return MockResponse(None, 200, read_file("data/actionlist.xml")) elif url == ACTION_LIST_URL_2: return MockResponse(None, 200, read_file("data/actionlist_no_url.xml")) elif url == SYSTEM_INFORMATION_URL: return MockResponse(None, 200, read_file("data/getSysteminformation.xml")) elif url == GET_REMOTE_COMMAND_LIST_URL: return MockResponse(None, 200, read_file("data/getRemoteCommandList.xml")) elif url == APP_LIST_URL or url == APP_LIST_URL_V4: return MockResponse(None, 200, read_file("data/appsList.xml")) elif url == REGISTRATION_URL_LEGACY: return MockResponse({}, 200) elif url == REGISTRATION_URL_V3_FAIL_401: MockResponse(None, 401).raise_for_status() elif url == GET_REMOTE_CONTROLLER_INFO_URL: return MockResponse(None, 200) elif url.startswith(REQUESTS_ERROR): raise RequestException() else: raise ValueError("Unknown url requested: {}".format(url)) return MockResponse(None, 404) class SonyDeviceTest(unittest.TestCase): @mock.patch('sonyapilib.device.SonyDevice._update_service_urls', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice._recreate_authentication', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice._update_commands', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice._update_applist', side_effect=mock_nothing) def test_init_device_no_pin(self, mock_update_applist, mock_update_command, mock_recreate_auth, mock_update_service_url): device = self.create_device() device.init_device() self.assertEqual(mock_update_service_url.call_count, 1) self.assertEqual(mock_recreate_auth.call_count, 0) self.assertEqual(mock_update_command.call_count, 1) self.assertEqual(mock_update_applist.call_count, 0) @mock.patch('sonyapilib.device.SonyDevice._update_service_urls', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice._recreate_authentication', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice._update_commands', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice._update_applist', side_effect=mock_nothing) def test_init_device_with_pin(self, mock_update_applist, mock_update_command, mock_recreate_auth, mock_update_service_url): device = self.create_device() device.pin = 1234 device.init_device() self.assertEqual(mock_update_service_url.call_count, 1) self.assertEqual(mock_recreate_auth.call_count, 1) self.assertEqual(mock_update_command.call_count, 1) self.assertEqual(mock_update_applist.call_count, 1) @mock.patch('sonyapilib.ssdp.SSDPDiscovery.discover', side_effect=mock_discovery) def test_discovery(self, mock_discover): devices = SonyDevice.discover() self.assertEqual(len(devices), 1) self.assertEqual(devices[0].host, "test") def test_save_load_from_json(self): device = self.create_device() jdata = device.save_to_json() restored_device = SonyDevice.load_from_json(jdata) jdata_restored = restored_device.save_to_json() self.assertEqual(jdata, jdata_restored) self.assertEqual(restored_device.client_id, device.client_id) def test_update_service_urls_error_response(self): device = self.create_device() device._update_service_urls() @mock.patch('requests.get', side_effect=mocked_requests_get) @mock.patch('sonyapilib.device.SonyDevice._parse_ircc', side_effect=mock_error) def test_update_service_urls_error_processing(self, mock_error, mocked_requests_get): device = self.create_device() device._update_service_urls() self.assertEqual(mock_error.call_count, 1) @mock.patch('sonyapilib.device.SonyDevice._send_http', side_effect=mock_request_exception) def test_update_service_urls_request_exception(self, mock_request_exception): device = self.create_device() device._update_service_urls() self.assertEqual(mock_request_exception.call_count, 1) @mock.patch('requests.get', side_effect=mocked_requests_get) @mock.patch('sonyapilib.device.SonyDevice._parse_ircc', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice._parse_action_list', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice._parse_system_information', side_effect=mock_nothing) def test_update_service_urls_v3(self, mock_ircc, mock_action_list, mock_system_information, mocked_requests_get): device = self.create_device() device.pin = 1234 device._update_service_urls() self.assertEqual(mock_ircc.call_count, 1) self.assertEqual(mock_action_list.call_count, 1) self.assertEqual(mock_system_information.call_count, 1) @mock.patch('requests.get', side_effect=mocked_requests_get) @mock.patch('requests.post', side_effect=mocked_requests_post) def test_update_service_urls_v4(self, mocked_requests_post, mocked_requests_get): device = self.create_device() device.pin = 1234 device.api_version = 4 device._update_service_urls() self.assertEqual(device.mac, "10:08:B1:31:81:B5") @mock.patch('requests.get', side_effect=mocked_requests_get) def test_parse_dmr_v3(self, mock_get): content = read_file("data/dmr_v3.xml") device = self.create_device() device._parse_dmr(content) self.verify_device_dmr(device) self.assertLess(device.api_version, 4) @mock.patch('requests.get', side_effect=mocked_requests_get) def test_parse_dmr_v4(self, mock_get): content = read_file("data/dmr_v4.xml") device = self.create_device() device._parse_dmr(content) self.verify_device_dmr(device) self.assertGreater(device.api_version, 3) self.assertEqual( device.actions["register"].url, REGISTRATION_URL_V4) self.assertEqual(device.actions["register"].mode, 4) self.assertEqual( device.actions["getRemoteCommandList"].url, 'http://192.168.170.23/sony/system') def test_parse_ircc_error(self): device = self.create_device() with self.assertRaises(RequestException): device._parse_ircc() @mock.patch('requests.get', side_effect=mocked_requests_get) def test_parse_ircc(self, mock_get): device = self.create_device() device._parse_ircc() self.assertEqual( device.actionlist_url, ACTION_LIST_URL) self.assertEqual( device.control_url, 'http://test:50001/upnp/control/IRCC') @mock.patch('requests.get', side_effect=mocked_requests_get) def test_parse_ircc_no_schema(self, mock_get): device = self.create_device() device.ircc_url = IRCC_URL_NO_SCHEMA device._parse_ircc() self.assertEqual( device.actionlist_url, ACTION_LIST_URL) self.assertEqual( device.control_url, 'http://test:50001/upnp/control/IRCC') @mock.patch('requests.get', side_effect=mocked_requests_get) def test_parse_ircc_no_missing_info(self, mock_get): device = self.create_device() device.ircc_url = IRCC_URL_MISSING_INFO device._parse_ircc() self.assertEqual( device.actionlist_url, ACTION_LIST_URL) self.assertEqual( device.control_url, 'http://test:50001/upnp/control/IRCC') def test_parse_action_list_error(self): device = self.create_device() device.actionlist_url = ACTION_LIST_URL device._parse_action_list() @mock.patch('requests.get', side_effect=mocked_requests_get) def test_parse_action_list(self, mock_get): device = self.create_device() device.actionlist_url = ACTION_LIST_URL device._parse_action_list() self.assertEqual(device.actions["register"].mode, 3) base_url = "http://192.168.240.4:50002/" for action in ACTION_LIST: self.assertEqual(device.actions[action].url, base_url + action) @mock.patch('requests.get', side_effect=mocked_requests_get) def test_parse_action_list_without_url(self, mock_get): device = self.create_device() device.actionlist_url = ACTION_LIST_URL_2 device._parse_action_list() self.assertEqual(device.actions["register"].mode, 3) for action in ACTION_LIST: action_url = "{}?action={}".format(ACTION_LIST_URL_2, action) self.assertEqual(device.actions[action].url, action_url) @mock.patch('requests.get', side_effect=mocked_requests_get) def test_parse_system_information(self, mock_get): device = self.create_device() data = XmlApiObject({}) data.url = SYSTEM_INFORMATION_URL device.actions["getSystemInformation"] = data device._parse_system_information() self.assertEqual(device.mac, "30-52-cb-cc-16-ee") @mock.patch('requests.post', side_effect=mocked_requests_empty) def test_parse_sys_info_error(self, mock_get): device = self.create_device() data = XmlApiObject({}) data.url = SYSTEM_INFORMATION_URL device.actions["getSystemInformation"] = data device._parse_system_information() self.assertEqual(device.mac, None) def prepare_test_action_list(self): device = self.create_device() data = XmlApiObject({}) data.url = GET_REMOTE_COMMAND_LIST_URL device.actions["getRemoteCommandList"] = data return device def test_parse_command_list_error(self): versions = [1, 2, 3, 4] for version in versions: device = self.prepare_test_action_list() device.api_version = version if version < 4: device._parse_command_list() else: device._parse_command_list_v4() @mock.patch('requests.get', side_effect=mocked_requests_get) @mock.patch('requests.post', side_effect=mocked_requests_post) def test_parse_command_list(self, mock_get, mock_post): versions = [1, 2, 3, 4] for version in versions: device = self.prepare_test_action_list() device.version = version if version < 4: cmd_length = 48 device._parse_command_list() else: cmd_length = 98 device._parse_command_list_v4() self.assertTrue("Power" in device.commands) self.assertTrue("Left" in device.commands) self.assertTrue("Pause" in device.commands) self.assertTrue("Num3" in device.commands) self.assertEqual(len(device.commands), cmd_length) @mock.patch('sonyapilib.device.SonyDevice._parse_command_list', side_effect=mock_nothing) def test_update_commands_no_pin(self, mock_parse_cmd_list): device = self.create_device() device._update_commands() self.assertEqual(mock_parse_cmd_list.call_count, 1) @mock.patch('sonyapilib.device.SonyDevice._use_builtin_command_list', side_effect=mock_nothing) def test_update_commands_v0(self, mock_parse_cmd_list): device = self.create_device() device.api_version = 0 device._update_commands() self.assertEqual(mock_parse_cmd_list.call_count, 1) @mock.patch('sonyapilib.device.SonyDevice._parse_command_list', side_effect=mock_nothing) def test_update_commands_v3(self, mock_parse_cmd_list): device = self.create_device() device.pin = 1234 device._update_commands() self.assertEqual(mock_parse_cmd_list.call_count, 1) @mock.patch('requests.get', side_effect=mocked_requests_get) def test_update_commands_v4(self, mock_get): device = self.create_device() device.pin = 1234 device.api_version = 4 action = XmlApiObject({}) action.url = GET_REMOTE_CONTROLLER_INFO_URL device.actions["getRemoteCommandList"] = action device._update_commands() def start_app(self, device, app_name, mock_post, mock_send_command): versions = [1, 2, 3, 4] apps = { "Video Explorer": "com.sony.videoexplorer", "Music Explorer": "com.sony.musicexplorer", "Video Player": "com.sony.videoplayer", "Music Player": "com.sony.musicplayer", "PlayStation Video": "com.sony.videounlimited", "Amazon Prime Video": "com.sony.iptv.4976", "Netflix": "com.sony.iptv.type.NRDP", "Rakuten TV": "com.sony.iptv.3479", "Tagesschau": "com.sony.iptv.type.EU-TAGESSCHAU_6x3", "Functions with Gracenote ended": "com.sony.iptv.6317", "watchmi Themenkanäle": "com.sony.iptv.4766", "Netzkino": "com.sony.iptv.4742", "MUBI": "com.sony.iptv.5498", "WWE Network": "com.sony.iptv.4340", "DW for Smart TV": "com.sony.iptv.4968", "YouTube": "com.sony.iptv.type.ytleanback", "uStudio": "com.sony.iptv.4386", "Meteonews TV": "com.sony.iptv.3487", "Digital Concert Hall": "com.sony.iptv.type.WW-BERLINPHIL_NBIV", "Activate Enhanced Features": "com.sony.iptv.4834" } for version in versions: device.api_version = version device.start_app(app_name) self.assertEqual(mock_post.call_count, 1) self.assertEqual(mock_send_command.call_count, 1) if version < 4: url = APP_START_URL_LEGACY + apps[app_name] else: url = APP_START_URL + apps[app_name] self.assertEqual(url, mock_post.call_args[0][0]) mock_send_command.call_count = 0 mock_post.call_count = 0 mock_post.mock_calls.clear() @mock.patch('sonyapilib.device.SonyDevice._send_command', side_effect=mock_nothing) @mock.patch('requests.post', side_effect=mocked_requests_post) @mock.patch('requests.get', side_effect=mocked_requests_get) def test_update_applist(self, mock_get, mock_post, mock_send_command): device = self.create_device() app_list = [ "Video Explorer", "Music Explorer", "Video Player", "Music Player", "PlayStation Video", "Amazon Prime Video", "Netflix", "Rakuten TV", "Tagesschau", "Functions with Gracenote ended", "watchmi Themenkanäle", "Netzkino", "MUBI", "WWE Network", "DW for Smart TV", "YouTube", "uStudio", "Meteonews TV", "Digital Concert Hall", "Activate Enhanced Features" ] versions = [1, 2, 3, 4] for version in versions: device.api_version = version device._update_applist() for app in device.get_apps(): self.assertTrue(app in app_list) self.start_app(device, app, mock_post, mock_send_command) self.assertEqual(len(device.apps), len(app_list)) def test_recreate_authentication_no_auth(self): versions = [1, 2] for version in versions: device = self.create_device() self.add_register_to_device(device, version) device._recreate_authentication() self.assertEqual(len(device.headers), 2) self.assertTrue(device.headers['X-CERS-DEVICE-ID'] == device.nickname) self.assertTrue(device.headers['X-CERS-DEVICE-INFO'] == device.nickname) def test_recreate_authentication_v3(self): device = self.create_device() device.pin = 1234 self.add_register_to_device(device, 3) device._recreate_authentication() self.assertEqual(device.headers["Authorization"], "Basic OjEyMzQ=") self.assertEqual(device.headers["X-CERS-DEVICE-ID"], device.client_id) def test_recreate_authentication_v4(self): device = self.create_device() device.pin = 1234 self.add_register_to_device(device, 4) device._recreate_authentication() self.assertEqual(device.headers["Authorization"], "Basic OjEyMzQ=") self.assertEqual(device.headers["Connection"], "keep-alive") self.verify_cookies(device) def test_recreate_authentication_v4_psk(self): device = SonyDevice("test", "test", "foobarPSK") device.pin = 1234 self.add_register_to_device(device, 4) device._recreate_authentication() self.assertTrue(device.psk) self.assertEqual(device.headers["X-Auth-PSK"], device.psk) @mock.patch('requests.get', side_effect=mocked_requests_get) def test_register_no_auth(self, mocked_get): versions = [1, 2] for version in versions: result = self.register_with_version(version) self.assertEqual(result[0], AuthenticationResult.SUCCESS) @mock.patch('requests.get', side_effect=mocked_requests_get) def test_register_no_auth_error(self, mocked_get): device = self.create_device() register_action = XmlApiObject({}) register_action.url = REQUESTS_ERROR self.assertEqual(AuthenticationResult.ERROR, device._register_without_auth(register_action)) @mock.patch('sonyapilib.device.SonyDevice.init_device', side_effect=mock_nothing) @mock.patch('requests.get', side_effect=mocked_requests_get) def test_register_not_supported(self, mocked_get, mocked_init_device): with self.assertRaises(ValueError): self.register_with_version(5) self.assertEqual(mocked_init_device.call_count, 0) def verify_register_fail(self, version, auth_result, mocked_init_device, url=None, pin=-1): if pin != -1: result = self.register_with_version(version, url) else: result = self.register_with_version(version, url, pin=pin) self.assertEqual(result[0], auth_result) self.assertEqual(mocked_init_device.call_count, 0) @mock.patch('sonyapilib.device.SonyDevice.init_device', side_effect=mock_nothing) def test_register_fail_http_timeout(self, mocked_init_device, pin=-1): versions = [1, 2, 3, 4] for version in versions: if pin != -1: self.verify_register_fail(version, AuthenticationResult.ERROR, mocked_init_device) @mock.patch('requests.get', side_effect=mocked_requests_get) @mock.patch('requests.post', side_effect=mocked_requests_post) @mock.patch('sonyapilib.device.SonyDevice.init_device', side_effect=mock_nothing) def test_register_fail_pin_needed(self, mocked_init_device, mock_request_get_401, mock_request_post_401): self.verify_register_fail(3, AuthenticationResult.PIN_NEEDED, mocked_init_device, REGISTRATION_URL_V3_FAIL_401) self.verify_register_fail(4, AuthenticationResult.PIN_NEEDED, mocked_init_device, REGISTRATION_URL_V4_FAIL_401, pin=None) self.verify_register_fail(4, AuthenticationResult.PIN_NEEDED, mocked_init_device, REGISTRATION_URL_V4_FAIL_401) @mock.patch('sonyapilib.device.SonyDevice.init_device', side_effect=mock_nothing) @mock.patch('requests.get', side_effect=mocked_requests_get) def test_register_success_v3(self, mocked_requests_get, mocked_init_device): result = self.register_with_version(3) self.assertEqual(result[0], AuthenticationResult.SUCCESS) self.assertEqual(mocked_init_device.call_count, 1) @mock.patch('sonyapilib.device.SonyDevice.init_device', side_effect=mock_nothing) @mock.patch('requests.post', side_effect=mocked_requests_post) def test_register_no_json_v4(self, mocked_requests_post, mocked_init_device): result = self.register_with_version(4, REGISTRATION_URL_V4_FAIL) self.assertEqual(result[0], AuthenticationResult.ERROR) self.assertEqual(mocked_init_device.call_count, 0) @mock.patch('sonyapilib.device.SonyDevice.init_device', side_effect=mock_nothing) @mock.patch('requests.post', side_effect=mocked_requests_post) def test_register_success_v4(self, mocked_requests_post, mocked_init_device): result = self.register_with_version(4, REGISTRATION_URL_V4) self.assertEqual(result[0], AuthenticationResult.SUCCESS) self.assertEqual(mocked_init_device.call_count, 1) @mock.patch('sonyapilib.device.SonyDevice.register', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice._recreate_authentication', side_effect=mock_nothing) def test_send_authentication_no_auth(self, mock_register, mock_recreate_auth): versions = [[1, True], [2, False], [3, False], [4, False]] for version in versions: device = self.create_device() self.add_register_to_device(device, version[0]) self.assertEqual(device.send_authentication(0), version[1]) self.assertEqual(mock_register.call_count, 0) self.assertEqual(mock_recreate_auth.call_count, 0) @mock.patch('sonyapilib.device.SonyDevice.register', side_effect=mock_register_success) @mock.patch('sonyapilib.device.SonyDevice._recreate_authentication', side_effect=mock_nothing) def test_send_authentication_with_auth(self, mock_register, mock_recreate_auth): versions = [3, 4] for version in versions: device = self.create_device() self.add_register_to_device(device, version) self.assertTrue(device.send_authentication(1234)) self.assertEqual(mock_register.call_count, 1) self.assertEqual(mock_recreate_auth.call_count, 1) mock_register.call_count = 0 mock_recreate_auth.call_count = 0 @mock.patch('sonyapilib.device.SonyDevice._send_command', side_effect=mock_nothing) def test_commands(self, mock_send_command): device = self.create_device() methods = ["up", "confirm", "down", "right", "left", "home", "options", "returns", "num1", "num2", "num3", "num4", "num5", "num6", "num7", "num8", "num9", "num0", "display", "audio", "sub_title", "favorites", "yellow", "blue", "red", "green", "play", "stop", "pause", "rewind", "forward", "prev", "next", "replay", "advance", "angle", "top_menu", "pop_up_menu", "eject", "karaoke", "netflix", "mode_3d", "zoom_in", "zoom_out", "browser_back", "browser_forward", "browser_bookmark_list", "list", "volume_up", "volume_down", "mute"] for method in methods: cmd_name = ''.join(x.capitalize() or '_' for x in method.split('_')) if method == "returns": cmd_name = "Return" elif method == "mode_3d": cmd_name = "Mode3D" getattr(device, method)() self.assertEqual(mock_send_command.call_count, 1) self.assertEqual(mock_send_command.mock_calls[0][1][0], cmd_name) mock_send_command.call_count = 0 mock_send_command.mock_calls.clear() @staticmethod def add_register_to_device(device, mode): register_action = XmlApiObject({}) register_action.mode = mode if mode < 4: register_action.url = REGISTRATION_URL_LEGACY else: register_action.url = REGISTRATION_URL_V4 device.actions["register"] = register_action def register_with_version(self, version, reg_url="", pin=1234): device = self.create_device() if version > 2: device.pin = pin self.add_register_to_device(device, version) if reg_url: device.actions["register"].url = reg_url result = device.register() return [result, device] def test_post_soap_request_invalid(self): device = self.create_device() params = "foobar" self.assertFalse(device._post_soap_request(SOAP_URL, params, params)) @mock.patch('requests.post', side_effect=mocked_requests_post) def test_post_soap_request(self, mocked_requests_post): params = "foobar" data = """<?xml version='1.0' encoding='utf-8'?> <SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> <SOAP-ENV:Body> {0} </SOAP-ENV:Body> </SOAP-ENV:Envelope>""".format("foobar") device = self.create_device() self.assertTrue(device._post_soap_request(SOAP_URL, params, params)) mock_call = mocked_requests_post.mock_calls[0][2] headers = mock_call["headers"] self.assertEqual(headers['SOAPACTION'], '"{}"'.format(params)) self.assertEqual(headers['Content-Type'], "text/xml") self.assertEqual(mock_call["data"], data) self.assertEqual(mocked_requests_post.call_count, 1) @mock.patch('sonyapilib.device.SonyDevice.init_device', side_effect=mock_nothing) def test_get_action(self, mock_init_device): device = self.create_device() action = XmlApiObject({}) action.name = "test" with self.assertRaises(ValueError): device._get_action(action.name) self.assertEqual(mock_init_device.call_count, 1) device.actions[action.name] = action self.assertEqual(device._get_action(action.name), action) @mock.patch('sonyapilib.device.SonyDevice._send_req_ircc', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice.init_device', side_effect=mock_nothing) def test_send_command_error(self, mock_init_device, mock_send_req_ircc): device = self.create_device() with self.assertRaises(ValueError): device._send_command("test") self.create_command_list(device) with self.assertRaises(ValueError): device._send_command("foo") device._send_command("test") self.assertEqual(mock_send_req_ircc.call_count, 1) @mock.patch('sonyapilib.device.SonyDevice._post_soap_request', side_effect=mock_nothing) def test_send_req_ircc(self, mock_post_soap_request): device = self.create_device() params = "foobar" data = """<u:X_SendIRCC xmlns:u="urn:schemas-sony-com:service:IRCC:1"> <IRCCCode>{0}</IRCCCode> </u:X_SendIRCC>""".format(params) device._send_req_ircc(params) self.assertEqual(mock_post_soap_request.call_count, 1) self.assertEqual(mock_post_soap_request.call_args_list[0][1]['params'], data) def test_get_power_status_false(self): versions = [1, 2, 3, 4] device = self.create_device() for version in versions: device.api_version = version self.assertFalse(device.get_power_status()) @mock.patch('requests.post', side_effect=mock_request_error) def test_get_power_status_error(self, mocked_request_error): device = self.create_device() device.api_version = 4 self.assertFalse(device.get_power_status()) @mock.patch('requests.post', side_effect=mocked_requests_post) def test_get_power_status_error2(self, mocked_requests_post): device = self.create_device() device.api_version = 4 device.base_url = REQUESTS_ERROR device.actionlist_url = ACTION_LIST_URL self.assertFalse(device.get_power_status()) @mock.patch('requests.get', side_effect=mocked_requests_get) @mock.patch('requests.post', side_effect=mocked_requests_post) def test_get_power_status_true(self, mocked_post, mocked_get): versions = [1, 2, 3, 4] device = self.create_device() device.actionlist_url = ACTION_LIST_URL device.base_url = BASE_URL for version in versions: device.api_version = version self.assertTrue(device.get_power_status()) @mock.patch('sonyapilib.device.SonyDevice._send_command', side_effect=mock_nothing) def test_power_off(self, mock_send_command): device = self.create_device() device.power(False) self.assertEqual(mock_send_command.call_count, 1) self.assertEqual(mock_send_command.mock_calls[0][1][0], "Power") @mock.patch('sonyapilib.device.SonyDevice.get_power_status', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice._send_command', side_effect=mock_nothing) @mock.patch('sonyapilib.device.SonyDevice.wakeonlan', side_effect=mock_nothing) def test_power_on(self, mock_wake_on_lan, mock_send_command, mock_get_power_status): device = self.create_device() device.power(True) self.assertEqual(mock_send_command.call_count, 1) self.assertEqual(mock_wake_on_lan.call_count, 1) self.assertEqual(mock_send_command.mock_calls[0][1][0], "Power") @mock.patch('wakeonlan.send_magic_packet', side_effect=mock_nothing()) def test_wake_on_lan(self, mocked_wol): device = self.create_device() device.wakeonlan() self.assertEqual(mocked_wol.call_count, 0) device.mac = "foobar" device.wakeonlan() self.assertEqual(mocked_wol.call_count, 1) @mock.patch('requests.post', side_effect=mocked_requests_post) def test_playing_status_no_media_legacy(self, mocked_requests_post): device = self.create_device() self.assertEqual("OFF", device.get_playing_status()) device.av_transport_url = AV_TRANSPORT_URL_NO_MEDIA device.get_playing_status() device.av_transport_url = AV_TRANSPORT_URL self.assertEqual("PLAYING", device.get_playing_status()) def test_irrc_is_dmr(self): dev = SonyDevice(host="none", nickname="none", ircc_port=42, dmr_port=42) self.assertEqual(dev.dmr_url, dev.ircc_url) def test_parse_use_built_in_command_list_invalid_category(self): device = self.create_device() device._ircc_categories = ["MTIzNDU2"] device._use_builtin_command_list() self.assertEqual(0, len(device.commands)) def test_parse_use_built_in_command_list(self): device = self.create_device() device._ircc_categories = ["AAMAABxa"] device._use_builtin_command_list() commands = ["Confirm", "Up", "Down", "Right", "Left", "Home", "Options", "Return", "Num1", "Num2", "Num3", "Num4", "Num5", "Num6", "Num7", "Num8", "Num9", "Num0", "Power", "Display", "Audio", "SubTitle", "Favorites", "Yellow", "Blue", "Red", "Green", "Play", "Stop", "Pause", "Rewind", "Forward", "Prev", "Next", "Replay", "Advance", "Angle", "TopMenu", "PopUpMenu", "Eject", "Karaoke", "Netflix", "Mode3D"] for cmd in commands: self.assertTrue(cmd in device.commands) def test_handle_register_error_not_http(self): ex = Exception() device = self.create_device() res = device._handle_register_error(ex) self.assertEqual(res, AuthenticationResult.ERROR) @mock.patch('sonyapilib.device.SonyDevice._send_http', side_effect=mocked_return_none) def test_parse_system_info_v4_no_response(self, mocked_request): device = self.create_device() device._parse_system_information_v4() @staticmethod def create_command_list(device): command = XmlApiObject({}) command.name = "test" device.commands[command.name] = command @staticmethod
MIT License
opencivicdata/pupa
pupa/importers/base.py
BaseImporter.import_data
python
def import_data(self, data_items): record = { 'insert': 0, 'update': 0, 'noop': 0, 'start': utcnow(), 'records': { 'insert': [], 'update': [], 'noop': [], } } for json_id, data in self._prepare_imports(data_items): obj_id, what = self.import_item(data) self.json_to_db_id[json_id] = obj_id record['records'][what].append(obj_id) record[what] += 1 self.postimport() record['end'] = utcnow() return {self._type: record}
import a bunch of dicts together
https://github.com/opencivicdata/pupa/blob/8087e221fc527a80262192d22c2f50966c20604d/pupa/importers/base.py#L220-L244
import os import copy import glob import json import logging from django.db.models import Q from django.db.models.signals import post_save from django.contrib.contenttypes.models import ContentType from opencivicdata.legislative.models import LegislativeSession from pupa import settings from pupa.exceptions import DuplicateItemError from pupa.utils import get_pseudo_id, utcnow from pupa.exceptions import UnresolvedIdError, DataImportError from pupa.models import Identifier def omnihash(obj): if isinstance(obj, set): return hash(frozenset(omnihash(e) for e in obj)) elif isinstance(obj, (tuple, list)): return hash(tuple(omnihash(e) for e in obj)) elif isinstance(obj, dict): return hash(frozenset((k, omnihash(v)) for k, v in obj.items())) else: return hash(obj) def items_differ(jsonitems, dbitems, subfield_dict): if len(jsonitems) == len(dbitems) == 0: return False elif len(jsonitems) != len(dbitems): return True original_jsonitems = jsonitems jsonitems = copy.deepcopy(jsonitems) keys = jsonitems[0].keys() for dbitem in dbitems: order = getattr(dbitem, 'order', None) match = None for i, jsonitem in enumerate(jsonitems): for k in keys: if k not in subfield_dict and getattr(dbitem, k) != jsonitem.get(k, None): break else: for k in subfield_dict: jsonsubitems = jsonitem[k] dbsubitems = list(getattr(dbitem, k).all()) if items_differ(jsonsubitems, dbsubitems, subfield_dict[k][2]): break else: if order is not None and int(order) != original_jsonitems.index(jsonitem): break match = i break if match is not None: jsonitems.pop(match) else: return True if jsonitems: return True return False class BaseImporter(object): _type = None model_class = None related_models = {} preserve_order = set() merge_related = {} cached_transformers = {} def __init__(self, jurisdiction_id): self.jurisdiction_id = jurisdiction_id self.json_to_db_id = {} self.duplicates = {} self.pseudo_id_cache = {} self.session_cache = {} self.logger = logging.getLogger("pupa") self.info = self.logger.info self.debug = self.logger.debug self.warning = self.logger.warning self.error = self.logger.error self.critical = self.logger.critical if settings.IMPORT_TRANSFORMERS.get(self._type): self.cached_transformers = settings.IMPORT_TRANSFORMERS[self._type] def get_session_id(self, identifier): if identifier not in self.session_cache: self.session_cache[identifier] = LegislativeSession.objects.get( identifier=identifier, jurisdiction_id=self.jurisdiction_id).id return self.session_cache[identifier] def prepare_for_db(self, data): return data def postimport(self): pass def resolve_json_id(self, json_id, allow_no_match=False): if not json_id: return None if json_id.startswith('~'): if json_id not in self.pseudo_id_cache: spec = get_pseudo_id(json_id) spec = self.limit_spec(spec) if isinstance(spec, Q): objects = self.model_class.objects.filter(spec) else: objects = self.model_class.objects.filter(**spec) ids = {each.id for each in objects} if len(ids) == 1: self.pseudo_id_cache[json_id] = ids.pop() errmsg = None elif not ids: errmsg = 'cannot resolve pseudo id to {}: {}'.format( self.model_class.__name__, json_id) else: errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format( self.model_class.__name__, json_id, ids) if errmsg: if not allow_no_match: raise UnresolvedIdError(errmsg) else: self.error(errmsg) self.pseudo_id_cache[json_id] = None return self.pseudo_id_cache[json_id] json_id = self.duplicates.get(json_id, json_id) try: return self.json_to_db_id[json_id] except KeyError: raise UnresolvedIdError('cannot resolve id: {}'.format(json_id)) def import_directory(self, datadir): def json_stream(): for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')): with open(fname) as f: yield json.load(f) return self.import_data(json_stream()) def _prepare_imports(self, dicts): seen_hashes = {} for data in dicts: json_id = data.pop('_id') objhash = omnihash(data) if objhash not in seen_hashes: seen_hashes[objhash] = json_id yield json_id, data else: self.duplicates[json_id] = seen_hashes[objhash]
BSD 3-Clause New or Revised License
botfront/rasa-for-botfront
rasa/shared/utils/validation.py
YamlValidationException.__init__
python
def __init__( self, message: Text, validation_errors: Optional[List[SchemaError.SchemaErrorEntry]] = None, filename: Optional[Text] = None, content: Any = None, ) -> None: super(YamlValidationException, self).__init__(filename) self.message = message self.validation_errors = validation_errors self.content = content
Create The Error. Args: message: error message validation_errors: validation errors filename: name of the file which was validated content: yaml content loaded from the file (used for line information)
https://github.com/botfront/rasa-for-botfront/blob/6e0e48d0059e197b5f686df1e27935769c3641b7/rasa/shared/utils/validation.py#L34-L53
import logging import os from typing import Text, Dict, List, Optional, Any from packaging import version from packaging.version import LegacyVersion from pykwalify.errors import SchemaError from ruamel.yaml.constructor import DuplicateKeyError import rasa.shared from rasa.shared.exceptions import ( YamlException, YamlSyntaxException, SchemaValidationError, ) import rasa.shared.utils.io from rasa.shared.constants import ( DOCS_URL_TRAINING_DATA, PACKAGE_NAME, LATEST_TRAINING_DATA_FORMAT_VERSION, SCHEMA_EXTENSIONS_FILE, RESPONSES_SCHEMA_FILE, ) logger = logging.getLogger(__name__) KEY_TRAINING_DATA_FORMAT_VERSION = "version" class YamlValidationException(YamlException, ValueError):
Apache License 2.0
containers/podman-py
podman/domain/pods_manager.py
PodsManager.prune
python
def prune(self, filters: Optional[Dict[str, str]] = None) -> Dict[str, Any]: response = self.client.post("/pods/prune", params={"filters": api.prepare_filters(filters)}) response.raise_for_status() deleted: List[str] = list() for item in response.json(): if item["Err"] is not None: raise APIError( item["Err"], response=response, explanation=f"""Failed to prune network '{item["Id"]}'""", ) deleted.append(item["Id"]) return {"PodsDeleted": deleted, "SpaceReclaimed": 0}
Delete unused Pods. Returns: Dictionary Keys: - PodsDeleted (List[str]): List of pod ids deleted. - SpaceReclaimed (int): Always zero. Raises: APIError: when service reports error
https://github.com/containers/podman-py/blob/7cff4162c6cbe3161d9a36bc645e1f11972bf2a9/podman/domain/pods_manager.py#L86-L109
import json import logging from typing import Any, Dict, List, Optional, Union from podman import api from podman.domain.manager import Manager from podman.domain.pods import Pod from podman.errors import APIError logger = logging.getLogger("podman.pods") class PodsManager(Manager): @property def resource(self): return Pod def create(self, name: str, **kwargs) -> Pod: data = dict() if kwargs is None else kwargs.copy() data["name"] = name response = self.client.post("/pods/create", data=json.dumps(data)) response.raise_for_status() body = response.json() return self.get(body["Id"]) def exists(self, key: str) -> bool: response = self.client.get(f"/pods/{key}/exists") return response.ok def get(self, pod_id: str) -> Pod: response = self.client.get(f"/pods/{pod_id}/json") response.raise_for_status() return self.prepare_model(attrs=response.json()) def list(self, **kwargs) -> List[Pod]: params = {"filters": api.prepare_filters(kwargs.get("filters"))} response = self.client.get("/pods/json", params=params) response.raise_for_status() return [self.prepare_model(attrs=i) for i in response.json()]
Apache License 2.0
muges/audiotsm
audiotsm/base/tsm.py
TSM.write_to
python
def write_to(self, writer): raise NotImplementedError
Writes as many result samples as possible to ``writer``. :param writer: a :class:`audiotsm.io.base.Writer`. :returns: a tuple (``n``, ``finished``), with: - ``n`` the number of samples that were written to ``writer`` - ``finished`` a boolean that is ``True`` when there are no samples remaining to write. In this case, the :func:`~audiotsm.base.tsm.TSM.read_from` method should be called to add new input samples, or, if there are no remaining input samples, the :func:`~audiotsm.base.tsm.TSM.flush_to` method should be called to get the last output samples. :rtype: (int, bool)
https://github.com/muges/audiotsm/blob/cf3875842bda44d81930c44b008937e72109ae9f/audiotsm/base/tsm.py#L95-L110
class TSM(object): def clear(self): raise NotImplementedError def flush_to(self, writer): raise NotImplementedError def get_max_output_length(self, input_length): raise NotImplementedError def read_from(self, reader): raise NotImplementedError def run(self, reader, writer, flush=True): finished = False while not (finished and reader.empty): self.read_from(reader) _, finished = self.write_to(writer) if flush: finished = False while not finished: _, finished = self.flush_to(writer) self.clear() def set_speed(self, speed): raise NotImplementedError
MIT License
opennetworkingfoundation/tapi
RI/flask_server/tapi_server/models/tapi_connectivity_connectivity_constraint.py
TapiConnectivityConnectivityConstraint.service_layer
python
def service_layer(self): return self._service_layer
Gets the service_layer of this TapiConnectivityConnectivityConstraint. :return: The service_layer of this TapiConnectivityConnectivityConstraint. :rtype: TapiCommonLayerProtocolName
https://github.com/opennetworkingfoundation/tapi/blob/1f3fd9483d5674552c5a31206c97399c8c151897/RI/flask_server/tapi_server/models/tapi_connectivity_connectivity_constraint.py#L87-L94
from __future__ import absolute_import from datetime import date, datetime from typing import List, Dict from tapi_server.models.base_model_ import Model from tapi_server.models.tapi_common_capacity import TapiCommonCapacity from tapi_server.models.tapi_common_forwarding_direction import TapiCommonForwardingDirection from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName from tapi_server.models.tapi_common_time_range import TapiCommonTimeRange from tapi_server.models.tapi_connectivity_connectivity_service_ref import TapiConnectivityConnectivityServiceRef from tapi_server.models.tapi_connectivity_service_type import TapiConnectivityServiceType from tapi_server import util class TapiConnectivityConnectivityConstraint(Model): def __init__(self, service_layer=None, schedule=None, connectivity_direction=None, requested_capacity=None, diversity_exclusion=None, service_level=None, service_type=None, coroute_inclusion=None): self.openapi_types = { 'service_layer': TapiCommonLayerProtocolName, 'schedule': TapiCommonTimeRange, 'connectivity_direction': TapiCommonForwardingDirection, 'requested_capacity': TapiCommonCapacity, 'diversity_exclusion': List[TapiConnectivityConnectivityServiceRef], 'service_level': str, 'service_type': TapiConnectivityServiceType, 'coroute_inclusion': TapiConnectivityConnectivityServiceRef } self.attribute_map = { 'service_layer': 'service-layer', 'schedule': 'schedule', 'connectivity_direction': 'connectivity-direction', 'requested_capacity': 'requested-capacity', 'diversity_exclusion': 'diversity-exclusion', 'service_level': 'service-level', 'service_type': 'service-type', 'coroute_inclusion': 'coroute-inclusion' } self._service_layer = service_layer self._schedule = schedule self._connectivity_direction = connectivity_direction self._requested_capacity = requested_capacity self._diversity_exclusion = diversity_exclusion self._service_level = service_level self._service_type = service_type self._coroute_inclusion = coroute_inclusion @classmethod def from_dict(cls, dikt) -> 'TapiConnectivityConnectivityConstraint': return util.deserialize_model(dikt, cls) @property
Apache License 2.0
mikeshardmind/sinbadcogs
channelredirect/redirect.py
ChannelRedirect.rset_add_chan
python
async def rset_add_chan(self, ctx, *channels: discord.TextChannel): if not channels: return await ctx.send_help() gsets = await self.config.guild(ctx.guild).all() mode = gsets["mode"] if not mode: return await ctx.send( "You need to set a mode using `{ctx.prefix}redirectset mode` first".format( ctx=ctx ) ) for channel in channels: if channel.id not in gsets[mode]: gsets[mode].append(channel.id) await self.config.guild(ctx.guild).set_raw(mode, value=gsets[mode]) await ctx.tick()
Adds one or more channels to the current mode's settings.
https://github.com/mikeshardmind/sinbadcogs/blob/e9353fb63f18f5c2025e177f89b028aa7ac7a63d/channelredirect/redirect.py#L185-L207
from __future__ import annotations import asyncio import contextlib from typing import Set import discord from redbot.core import checks, commands from redbot.core.config import Config from .converters import CogOrCOmmand, CommandConverter, TrinaryBool class ChannelRedirect(commands.Cog): __version__ = "2021.03" async def red_delete_data_for_user(self, **kwargs): return def format_help_for_context(self, ctx): pre_processed = super().format_help_for_context(ctx) return f"{pre_processed}\nCog Version: {self.__version__}" def __init__(self, bot, *args, **kwargs): super().__init__(*args, **kwargs) self.bot = bot self.config = Config.get_conf( self, identifier=78631113035100160 + 1, force_registration=True ) self.config.register_guild( mode=None, blocklist=[], allowlist=[], command={}, cog={}, immunities={}, com_allowlist={"cog": {}, "command": {}}, ) def cog_unload(self): self.bot.remove_before_invoke_hook(self.before_invoke_hook) @staticmethod def should_early_exit(conf: dict, com: commands.Command): if conf["mode"] is None: return True with contextlib.suppress(KeyError): if conf["com_allowlist"]["command"][com.qualified_name]: return True with contextlib.suppress(KeyError): if conf["com_allowlist"]["cog"][com.cog_name]: return True async def get_allowed_channels( self, ctx: commands.Context, *, ignore_overides: bool = False, com: commands.Command = None, ) -> Set[discord.TextChannel]: guild = ctx.guild assert guild is not None com = com or ctx.command gset = await self.config.guild(guild).all() channels = guild.text_channels allowed_ids: Set[int] = set() if com and self.should_early_exit(gset, com): return set(channels) if gset["mode"] == "allowlist": allowed_ids = {int(idx) for idx in gset["allowlist"]} elif gset["mode"] == "blocklist": disallowed_ids = {int(idx) for idx in gset["blocklist"]} allowed_ids = {c.id for c in channels} - disallowed_ids if not ignore_overides: com_extras = gset["command"].get(com.qualified_name, {}) cog_extras = gset["cog"].get(com.cog_name, {}) for rule_set in (cog_extras, com_extras): for channel_id, allowed in rule_set.items(): if allowed: allowed_ids.add(int(channel_id)) elif allowed is False: allowed_ids.discard(int(channel_id)) return {channel for channel in channels if channel.id in allowed_ids} async def is_redirect_immune(self, ctx): if ( ctx.guild is None or ctx.guild.owner == ctx.author or await ctx.bot.is_owner(ctx.author) or await ctx.bot.is_admin(ctx.author) ): return True imset = await self.config.guild(ctx.guild).immunities.all() vals = [v for k, v in imset.items() if k in (str(ctx.channel.id), "global")] immune_ids = set() for val in vals: immune_ids.update({int(v) for v in val}) if immune_ids & {r.id for r in ctx.author.roles}: return True async def before_invoke_hook(self, ctx: commands.Context): if await self.is_redirect_immune(ctx): return True allowed_chans = await self.get_allowed_channels(ctx) if ctx.channel not in allowed_chans and not isinstance( ctx.command, commands.commands._AlwaysAvailableCommand ): chan_mentions = ", ".join(c.mention for c in allowed_chans) await ctx.send( f"{ctx.author.mention} This command is only available in {chan_mentions}", delete_after=30, ) raise commands.CheckFailure() else: return True @commands.guild_only() @checks.admin_or_permissions(manage_guild=True) @commands.group(name="redirectset") async def rset(self, ctx): pass @rset.command(name="showsettings") async def rest_show_settings(self, ctx, command: CommandConverter = None): com_obj = command.com if command is not None else None channels = await self.get_allowed_channels( ctx, com=com_obj, ignore_overides=(command is None) ) msg = "Usable channels:\n" + ", ".join(c.mention for c in channels) await ctx.send(msg) @rset.command(name="mode") async def rset_set_mode(self, ctx, *, mode: str = ""): mode = mode.casefold() if mode not in ("allowlist", "blocklist"): return await ctx.send_help() await self.config.guild(ctx.guild).mode.set(to_store) await ctx.tick() @rset.command(name="addchan")
Apache License 2.0
giampaolo/pyftpdlib
pyftpdlib/authorizers.py
DummyAuthorizer.add_user
python
def add_user(self, username, password, homedir, perm='elr', msg_login="Login successful.", msg_quit="Goodbye."): if self.has_user(username): raise ValueError('user %r already exists' % username) if not isinstance(homedir, unicode): homedir = homedir.decode('utf8') if not os.path.isdir(homedir): raise ValueError('no such directory: %r' % homedir) homedir = os.path.realpath(homedir) self._check_permissions(username, perm) dic = {'pwd': str(password), 'home': homedir, 'perm': perm, 'operms': {}, 'msg_login': str(msg_login), 'msg_quit': str(msg_quit) } self.user_table[username] = dic
Add a user to the virtual users table. AuthorizerError exceptions raised on error conditions such as invalid permissions, missing home directory or duplicate usernames. Optional perm argument is a string referencing the user's permissions explained below: Read permissions: - "e" = change directory (CWD command) - "l" = list files (LIST, NLST, STAT, MLSD, MLST, SIZE, MDTM commands) - "r" = retrieve file from the server (RETR command) Write permissions: - "a" = append data to an existing file (APPE command) - "d" = delete file or directory (DELE, RMD commands) - "f" = rename file or directory (RNFR, RNTO commands) - "m" = create directory (MKD command) - "w" = store a file to the server (STOR, STOU commands) - "M" = change file mode (SITE CHMOD command) - "T" = update file last modified time (MFMT command) Optional msg_login and msg_quit arguments can be specified to provide customized response strings when user log-in and quit.
https://github.com/giampaolo/pyftpdlib/blob/5793ee5f61029d232f940a69a92bf67996be7f00/pyftpdlib/authorizers.py#L75-L117
import errno import os import sys import warnings from ._compat import PY3 from ._compat import unicode from ._compat import getcwdu __all__ = ['DummyAuthorizer', ] class AuthorizerError(Exception): class AuthenticationFailed(Exception): class DummyAuthorizer(object): read_perms = "elr" write_perms = "adfmwMT" def __init__(self): self.user_table = {}
MIT License
pypa/pipenv
pipenv/patched/notpip/_vendor/urllib3/connectionpool.py
HTTPSConnectionPool._new_conn
python
def _new_conn(self): self.num_connections += 1 log.debug( "Starting new HTTPS connection (%d): %s:%s", self.num_connections, self.host, self.port or "443", ) if not self.ConnectionCls or self.ConnectionCls is DummyConnection: raise SSLError( "Can't connect to HTTPS URL because the SSL module is not available." ) actual_host = self.host actual_port = self.port if self.proxy is not None: actual_host = self.proxy.host actual_port = self.proxy.port conn = self.ConnectionCls( host=actual_host, port=actual_port, timeout=self.timeout.connect_timeout, strict=self.strict, cert_file=self.cert_file, key_file=self.key_file, key_password=self.key_password, **self.conn_kw ) return self._prepare_conn(conn)
Return a fresh :class:`httplib.HTTPSConnection`.
https://github.com/pypa/pipenv/blob/9378cb515189d11841a4de49a5ac3c01fca509ec/pipenv/patched/notpip/_vendor/urllib3/connectionpool.py#L950-L984
from __future__ import absolute_import import errno import logging import sys import warnings from socket import error as SocketError, timeout as SocketTimeout import socket from .exceptions import ( ClosedPoolError, ProtocolError, EmptyPoolError, HeaderParsingError, HostChangedError, LocationValueError, MaxRetryError, ProxyError, ReadTimeoutError, SSLError, TimeoutError, InsecureRequestWarning, NewConnectionError, ) from .packages.ssl_match_hostname import CertificateError from .packages import six from .packages.six.moves import queue from .connection import ( port_by_scheme, DummyConnection, HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, HTTPException, BaseSSLError, ) from .request import RequestMethods from .response import HTTPResponse from .util.connection import is_connection_dropped from .util.request import set_file_position from .util.response import assert_header_parsing from .util.retry import Retry from .util.timeout import Timeout from .util.url import ( get_host, parse_url, Url, _normalize_host as normalize_host, _encode_target, ) from .util.queue import LifoQueue xrange = six.moves.xrange log = logging.getLogger(__name__) _Default = object() class ConnectionPool(object): scheme = None QueueCls = LifoQueue def __init__(self, host, port=None): if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self._proxy_host = host.lower() self.port = port def __str__(self): return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() return False def close(self): pass _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} class HTTPConnectionPool(ConnectionPool, RequestMethods): scheme = "http" ConnectionCls = HTTPConnection ResponseCls = HTTPResponse def __init__( self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, **conn_kw ): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) self.strict = strict if not isinstance(timeout, Timeout): timeout = Timeout.from_float(timeout) if retries is None: retries = Retry.DEFAULT self.timeout = timeout self.retries = retries self.pool = self.QueueCls(maxsize) self.block = block self.proxy = _proxy self.proxy_headers = _proxy_headers or {} for _ in xrange(maxsize): self.pool.put(None) self.num_connections = 0 self.num_requests = 0 self.conn_kw = conn_kw if self.proxy: self.conn_kw.setdefault("socket_options", []) def _new_conn(self): self.num_connections += 1 log.debug( "Starting new HTTP connection (%d): %s:%s", self.num_connections, self.host, self.port or "80", ) conn = self.ConnectionCls( host=self.host, port=self.port, timeout=self.timeout.connect_timeout, strict=self.strict, **self.conn_kw ) return conn def _get_conn(self, timeout=None): conn = None try: conn = self.pool.get(block=self.block, timeout=timeout) except AttributeError: raise ClosedPoolError(self, "Pool is closed.") except queue.Empty: if self.block: raise EmptyPoolError( self, "Pool reached maximum size and no more connections are allowed.", ) pass if conn and is_connection_dropped(conn): log.debug("Resetting dropped connection: %s", self.host) conn.close() if getattr(conn, "auto_open", 1) == 0: conn = None return conn or self._new_conn() def _put_conn(self, conn): try: self.pool.put(conn, block=False) return except AttributeError: pass except queue.Full: log.warning("Connection pool is full, discarding connection: %s", self.host) if conn: conn.close() def _validate_conn(self, conn): pass def _prepare_proxy(self, conn): pass def _get_timeout(self, timeout): if timeout is _Default: return self.timeout.clone() if isinstance(timeout, Timeout): return timeout.clone() else: return Timeout.from_float(timeout) def _raise_timeout(self, err, url, timeout_value): if isinstance(err, SocketTimeout): raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % timeout_value ) if hasattr(err, "errno") and err.errno in _blocking_errnos: raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % timeout_value ) if "timed out" in str(err) or "did not complete (read)" in str( err ): raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % timeout_value ) def _make_request( self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw ): self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = timeout_obj.connect_timeout try: self._validate_conn(conn) except (SocketTimeout, BaseSSLError) as e: self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) raise if chunked: conn.request_chunked(method, url, **httplib_request_kw) else: conn.request(method, url, **httplib_request_kw) read_timeout = timeout_obj.read_timeout if getattr(conn, "sock", None): if read_timeout == 0: raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % read_timeout ) if read_timeout is Timeout.DEFAULT_TIMEOUT: conn.sock.settimeout(socket.getdefaulttimeout()) else: conn.sock.settimeout(read_timeout) try: try: httplib_response = conn.getresponse(buffering=True) except TypeError: try: httplib_response = conn.getresponse() except BaseException as e: six.raise_from(e, None) except (SocketTimeout, BaseSSLError, SocketError) as e: self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise http_version = getattr(conn, "_http_vsn_str", "HTTP/?") log.debug( '%s://%s:%s "%s %s %s" %s %s', self.scheme, self.host, self.port, method, url, http_version, httplib_response.status, httplib_response.length, ) try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: log.warning( "Failed to parse headers (url=%s): %s", self._absolute_url(url), hpe, exc_info=True, ) return httplib_response def _absolute_url(self, path): return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url def close(self): if self.pool is None: return old_pool, self.pool = self.pool, None try: while True: conn = old_pool.get(block=False) if conn: conn.close() except queue.Empty: pass def is_same_host(self, url): if url.startswith("/"): return True scheme, host, port = get_host(url) if host is not None: host = _normalize_host(host, scheme=scheme) if self.port and not port: port = port_by_scheme.get(scheme) elif not self.port and port == port_by_scheme.get(scheme): port = None return (scheme, host, port) == (self.scheme, self.host, self.port) def urlopen( self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw ): if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get("preload_content", True) if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) if url.startswith("/"): url = six.ensure_str(_encode_target(url)) else: url = six.ensure_str(parse_url(url).url) conn = None release_this_conn = release_conn if self.scheme == "http": headers = headers.copy() headers.update(self.proxy_headers) err = None clean_exit = False body_pos = set_file_position(body, body_pos) try: timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr( conn, "sock", None ) if is_new_proxy_conn: self._prepare_proxy(conn) httplib_response = self._make_request( conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked, ) response_conn = conn if not release_conn else None response_kw["request_method"] = method response = self.ResponseCls.from_httplib( httplib_response, pool=self, connection=response_conn, retries=retries, **response_kw ) clean_exit = True except queue.Empty: raise EmptyPoolError(self, "No pool connections are available.") except ( TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError, CertificateError, ) as e: clean_exit = False if isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError("Cannot connect to proxy.", e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError("Connection aborted.", e) retries = retries.increment( method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] ) retries.sleep() err = e finally: if not clean_exit: conn = conn and conn.close() release_this_conn = True if release_this_conn: self._put_conn(conn) if not conn: log.warning( "Retrying (%r) after connection broken by '%r': %s", retries, err, url ) return self.urlopen( method, url, body, headers, retries, redirect, assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, chunked=chunked, body_pos=body_pos, **response_kw ) def drain_and_release_conn(response): try: response.read() except ( TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError, ): pass redirect_location = redirect and response.get_redirect_location() if redirect_location: if response.status == 303: method = "GET" try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_redirect: drain_and_release_conn(response) raise return response drain_and_release_conn(response) retries.sleep_for_retry(response) log.debug("Redirecting %s -> %s", url, redirect_location) return self.urlopen( method, redirect_location, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, chunked=chunked, body_pos=body_pos, **response_kw ) has_retry_after = bool(response.getheader("Retry-After")) if retries.is_retry(method, response.status, has_retry_after): try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_status: drain_and_release_conn(response) raise return response drain_and_release_conn(response) retries.sleep(response) log.debug("Retry: %s", url) return self.urlopen( method, url, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, chunked=chunked, body_pos=body_pos, **response_kw ) return response class HTTPSConnectionPool(HTTPConnectionPool): scheme = "https" ConnectionCls = HTTPSConnection def __init__( self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, key_file=None, cert_file=None, cert_reqs=None, key_password=None, ca_certs=None, ssl_version=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None, **conn_kw ): HTTPConnectionPool.__init__( self, host, port, strict, timeout, maxsize, block, headers, retries, _proxy, _proxy_headers, **conn_kw ) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.key_password = key_password self.ca_certs = ca_certs self.ca_cert_dir = ca_cert_dir self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint def _prepare_conn(self, conn): if isinstance(conn, VerifiedHTTPSConnection): conn.set_cert( key_file=self.key_file, key_password=self.key_password, cert_file=self.cert_file, cert_reqs=self.cert_reqs, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint, ) conn.ssl_version = self.ssl_version return conn def _prepare_proxy(self, conn): conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) conn.connect()
MIT License
100/solid
Solid/HarmonySearch.py
HarmonySearch._score
python
def _score(self, harmony): pass
Returns score of a harmony :param harmony: a harmony :return: score of harmony
https://github.com/100/solid/blob/f38ca4906b7a253bfbb74f271229625d0f1df175/Solid/HarmonySearch.py#L97-L104
from abc import ABCMeta, abstractmethod from random import choice, random, uniform from numpy import argmax, argmin class HarmonySearch: __metaclass__ = ABCMeta cur_steps = None hms = None hmcr = None par = None fw = None memory = None scores = None best = None max_steps = None max_score = None def __init__(self, hms, hmcr, par, fw, max_steps, max_score=None): if isinstance(hms, int) and hms > 0: self.hms = hms else: raise TypeError('Harmony memory size must be a positive integer') if isinstance(hmcr, float) and 0 <= hmcr <= 1: self.hmcr = hmcr else: raise TypeError('Harmony memory considering rate must be a float between 0 and 1') if isinstance(par, float) and 0 <= par <= 1: self.par = par else: raise TypeError('Pitch adjustment rate must be a float between 0 and 1') if isinstance(fw, (int, float)): self.fw = float(fw) else: raise TypeError('Fret width must be a numeric type') if isinstance(max_steps, int) and max_steps > 0: self.max_steps = max_steps else: raise TypeError('Max steps must be a positive integer') if max_score is not None: if isinstance(max_score, (int, float)): self.max_score = max_score else: raise TypeError('Max score must be a numeric type') def __str__(self): return ('HARMONY SEARCH: \n' + 'CURRENT STEPS: %d \n' + 'BEST SCORE: %f \n' + 'BEST MEMBER: %s \n\n') % (self.cur_steps, self._score(self.best), str(self.best)) def __repr__(self): return self.__str__() def _clear(self): self.cur_steps = 0 self.memory = list([self._random_harmony() for _ in range(self.hms)]) self.scores = None @abstractmethod def _random_harmony(self): pass @abstractmethod
MIT License
google/aiyprojects-raspbian
src/aiy/leds.py
Leds.rgb
python
def rgb(state, rgb): return {i + 1 : Leds.Channel(state, rgb[i]) for i in range(3)}
Creates a configuration for the RGB channels: 1 (red), 2 (green), 3 (blue). Generally, you should instead use convenience constructors such as :func:`rgb_on` and :func:`rgb_pattern`. Args: state: Either :attr:`Channel.ON`, :attr:`Channel.OFF`, or :attr:`Channel.PATTERN`. rgb: Either one of the :class:`Color` constants or your own tuple of RGB values. Returns: A dictionary of 3 :class:`Channel` objects, representing red, green, and blue values.
https://github.com/google/aiyprojects-raspbian/blob/964f07f5b4bd2ec785cfda6f318e50e1b67d4758/src/aiy/leds.py#L197-L212
import math import os _DEVICE_PATH = '/sys/class/leds/ktd202x:led1/device/' def _tflash_reg(duration_ms): if duration_ms <= 128: return 0 if duration_ms <= 384: return 1 return min((int(round(duration_ms / 128))) - 2, 126) def _pwm1_reg(percent): return int(round(256.0 * percent)) def _trise_tfall_reg(duration_ms): if duration_ms <= 1.5: return 0 return min(int(round(duration_ms / 96)), 15) def _write(path, data): with open(path, 'w') as f: f.write(str(data)) def _device_file(prop): return os.path.join(_DEVICE_PATH, prop) class Color: @staticmethod def blend(color_a, color_b, alpha): return tuple([math.ceil(alpha * color_a[i] + (1.0 - alpha) * color_b[i]) for i in range(3)]) BLACK = (0x00, 0x00, 0x00) RED = (0xFF, 0x00, 0x00) GREEN = (0x00, 0xFF, 0x00) YELLOW = (0xFF, 0xFF, 0x00) BLUE = (0x00, 0x00, 0xFF) PURPLE = (0xFF, 0x00, 0xFF) CYAN = (0x00, 0xFF, 0xFF) WHITE = (0xFF, 0xFF, 0xFF) class Pattern: def __init__(self, period_ms, on_percent=0.5, rise_ms=0, fall_ms=0): if on_percent < 0 or on_percent > 0.996: raise ValueError('on_percent must be in the range [0..0.996]') if period_ms < 0 or rise_ms < 0 or fall_ms < 0: raise ValueError('durations must be non-negative') self.period_ms = period_ms self.on_percent = on_percent self.rise_ms = rise_ms self.fall_ms = fall_ms @staticmethod def blink(period_ms): return Pattern(period_ms, 0.5) @staticmethod def breathe(period_ms): return Pattern(period_ms, 0.3, period_ms * 0.3, period_ms * 0.3) class Leds: class Channel: OFF = 0 ON = 1 PATTERN = 2 def __init__(self, state, brightness): if state not in (self.ON, self.OFF, self.PATTERN): raise ValueError('state must be OFF, ON, or PATTERN') if brightness < 0 or brightness > 255: raise ValueError('brightness must be in the range [0..255]') self.state = state self.brightness = brightness @staticmethod
Apache License 2.0
googleapis/synthtool
autosynth/multi.py
_list_issues_cached
python
def _list_issues_cached(gh, *args, **kwargs): return list(gh.list_issues(*args, **kwargs))
A caching wrapper for listing issues, so we don't expend our quota.
https://github.com/googleapis/synthtool/blob/d4ff3cd9a9b2567cc00ab67290eeb89992b20318/autosynth/multi.py#L134-L136
import argparse import functools import importlib import os import pathlib import subprocess import sys import typing from typing import Any, List import requests import yaml from synthtool.report import make_report from autosynth import executor, github, synth from autosynth.log import logger Runner = typing.Callable[ [typing.List[str], typing.Any, pathlib.Path], typing.Tuple[int, bytes] ] def _execute( command: typing.List[str], env: typing.Any, log_file_path: pathlib.Path ) -> typing.Tuple[int, bytes]: log_file_path.parent.mkdir(parents=True, exist_ok=True) with open(log_file_path, "wb+") as log_file: result = executor.run( command=command, stdout=log_file, stderr=subprocess.STDOUT, check=False, encoding="utf-8", env=env, ) with open(log_file_path, "rb") as fp: return (result.returncode, fp.read()) def synthesize_library( library: typing.Dict, github_token: str, extra_args: typing.List[str], base_log_path: pathlib.Path, runner: Runner = _execute, ) -> typing.Dict: logger.info(f"Synthesizing {library['name']}.") command = [sys.executable, "-m", "autosynth.synth"] env = os.environ env["GITHUB_TOKEN"] = github_token library_args = [ "--repository", library["repository"], "--synth-path", library.get("synth-path", ""), "--branch-suffix", library.get("branch-suffix", ""), "--pr-title", library.get("pr-title", ""), "--base-log-dir", str(base_log_path), ] if library.get("metadata-path"): library_args.extend(["--metadata-path", library.get("metadata-path")]) if library.get("deprecated-execution", False): library_args.append("--deprecated-execution") log_file_dir = ( pathlib.Path(base_log_path) / pathlib.Path(library.get("synth-path", "") or library["repository"]).name ) log_file_path = log_file_dir / "sponge_log.log" (returncode, output) = runner( command + library_args + library.get("args", []) + extra_args, env, log_file_path, ) error = returncode not in (0, synth.EXIT_CODE_SKIPPED) skipped = returncode == synth.EXIT_CODE_SKIPPED results = [ { "name": library["name"], "error": error, "output": "See the test log.", "skipped": skipped, } ] make_report(library["name"], results, log_file_dir) if error: logger.error(f"Synthesis failed for {library['name']}") return { "name": library["name"], "output": output.decode("utf-8", errors="ignore"), "error": error, "skipped": skipped, } @functools.lru_cache()
Apache License 2.0
tmcknight/movie-and-tv-show-search-alfred-workflow
mako/runtime.py
Context.lookup
python
def lookup(self): return self._with_template.lookup
Return the :class:`.TemplateLookup` associated with this :class:`.Context`.
https://github.com/tmcknight/movie-and-tv-show-search-alfred-workflow/blob/243959cd26f2abc194bbc7f9231faf4f1ab28e31/mako/runtime.py#L50-L55
from mako import exceptions, util, compat from mako.compat import compat_builtins import sys class Context(object): def __init__(self, buffer, **data): self._buffer_stack = [buffer] self._data = data self._kwargs = data.copy() self._with_template = None self._outputting_as_unicode = None self.namespaces = {} self._data['capture'] = compat.partial(capture, self) self.caller_stack = self._data['caller'] = CallerStack() def _set_with_template(self, t): self._with_template = t illegal_names = t.reserved_names.intersection(self._data) if illegal_names: raise exceptions.NameConflictError( "Reserved words passed to render(): %s" % ", ".join(illegal_names)) @property
MIT License
yoseflab/cassiopeia
cassiopeia/preprocess/utilities.py
convert_alleletable_to_lineage_profile
python
def convert_alleletable_to_lineage_profile( allele_table, cut_sites: Optional[List[str]] = None, collapse_duplicates: bool = True, ) -> pd.DataFrame: if cut_sites is None: cut_sites = get_default_cut_site_columns(allele_table) agg_recipe = dict( zip([cutsite for cutsite in cut_sites], [list] * len(cut_sites)) ) g = allele_table.groupby(["cellBC", "intBC"]).agg(agg_recipe) intbcs = allele_table["intBC"].unique() i1 = [] for i in intbcs: i1 += [i] * len(cut_sites) i2 = list(cut_sites) * len(intbcs) indices = [i1, i2] allele_piv = pd.DataFrame(index=g.index.levels[0], columns=indices) for j in tqdm(g.index, desc="filling in multiindex table"): for val, cutsite in zip(g.loc[j], cut_sites): if collapse_duplicates: val = sorted(set(val)) val = tuple(val) if len(val) == 1: val = val[0] allele_piv.loc[j[0]][j[1], cutsite] = val allele_piv2 = pd.pivot_table( allele_table, index=["cellBC"], columns=["intBC"], values="UMI", aggfunc=pylab.size, ) col_order = ( allele_piv2.dropna(axis=1, how="all") .sum() .sort_values(ascending=False, inplace=False) .index ) lineage_profile = allele_piv[col_order] lineage_profile.columns = [ "_".join(tup).rstrip("_") for tup in lineage_profile.columns.values ] return lineage_profile
Converts an AlleleTable to a lineage profile. Takes in an allele table that summarizes the indels observed at individual cellBC-intBC pairs and produces a lineage profile, which essentially is a pivot table over the cellBC / intBCs. Conceptually, these lineage profiles are identical to character matrices, only the values in the matrix are the actual indel identities. Args: allele_table: AlleleTable. cut_sites: Columns in the AlleleTable to treat as cut sites. If None, we assume that the cut-sites are denoted by columns of the form "r{int}" (e.g. "r1") collapse_duplicates: Whether or not to collapse duplicate character states present for a single cellBC-intBC pair. This option has no effect if there are no allele conflicts. Defaults to True. Returns: An NxM lineage profile.
https://github.com/yoseflab/cassiopeia/blob/6a4479e260a5fbefc663e0cecb7dfd51a4a01376/cassiopeia/preprocess/utilities.py#L480-L555
import functools import itertools import os import time from typing import Callable, Dict, List, Optional, Tuple import warnings from collections import defaultdict, OrderedDict import matplotlib import matplotlib.pyplot as plt import ngs_tools as ngs import numpy as np import pandas as pd import pylab import pysam import re from tqdm.auto import tqdm from cassiopeia.mixins import is_ambiguous_state, logger, PreprocessWarning def log_molecule_table(wrapped: Callable): @functools.wraps(wrapped) def wrapper(*args, **kwargs): df = wrapped(*args, **kwargs) umi_count = df["UMI"].dtype != object logger.debug( f"Resulting {'alleletable' if umi_count else 'molecule_table'} statistics:" ) logger.debug(f"# Reads: {df['readCount'].sum()}") logger.debug(f"# UMIs: {df['UMI'].sum() if umi_count else df.shape[0]}") logger.debug(f"# Cell BCs: {df['cellBC'].nunique()}") return df return wrapper def log_runtime(wrapped: Callable): @functools.wraps(wrapped) def wrapper(*args, **kwargs): t0 = time.time() logger.info("Starting...") try: return wrapped(*args, **kwargs) finally: logger.info(f"Finished in {time.time() - t0} s.") return wrapper def log_kwargs(wrapped: Callable): @functools.wraps(wrapped) def wrapper(*args, **kwargs): logger.debug(f"Keyword arguments: {kwargs}") return wrapped(*args, **kwargs) return wrapper @log_molecule_table def filter_cells( molecule_table: pd.DataFrame, min_umi_per_cell: int = 10, min_avg_reads_per_umi: float = 2.0, ) -> pd.DataFrame: umi_count = molecule_table["UMI"].dtype != object cell_groups = molecule_table.groupby("cellBC") umis_per_cell = ( cell_groups["UMI"].sum() if umi_count else cell_groups.size() ) umis_per_cell_mask = umis_per_cell >= min_umi_per_cell avg_reads_per_umi = cell_groups["readCount"].sum() / umis_per_cell avg_read_per_umi_mask = avg_reads_per_umi >= min_avg_reads_per_umi umis_per_cell_passing = set(umis_per_cell_mask.index[umis_per_cell_mask]) avg_read_per_umi_passing = set( avg_read_per_umi_mask.index[avg_read_per_umi_mask] ) passing_cells = umis_per_cell_passing & avg_read_per_umi_passing passing_mask = molecule_table["cellBC"].isin(passing_cells) n_cells = molecule_table["cellBC"].nunique() logger.info( f"Filtered out {n_cells - len(passing_cells)} cells with too few UMIs " "or too few average number of reads per UMI." ) molecule_table_filt = molecule_table[~passing_mask] n_umi_filt = ( molecule_table_filt["UMI"].sum() if umi_count else molecule_table_filt.shape[0] ) logger.info(f"Filtered out {n_umi_filt} UMIs as a result.") return molecule_table[passing_mask].copy() @log_molecule_table def filter_umis( molecule_table: pd.DataFrame, min_reads_per_umi: int = 100 ) -> pd.DataFrame: return molecule_table[molecule_table["readCount"] >= min_reads_per_umi] @log_molecule_table def error_correct_intbc( molecule_table: pd.DataFrame, prop: float = 0.5, umi_count_thresh: int = 10, dist_thresh: int = 1, ) -> pd.DataFrame: if prop > 0.5: warnings.warn( "No intBC correction was done because `prop` is greater than 0.5.", PreprocessWarning, ) return molecule_table cellBC_intBC_allele_groups = molecule_table.groupby( ["cellBC", "intBC", "allele"], sort=False ) cellBC_intBC_allele_indices = cellBC_intBC_allele_groups.groups molecule_table_agg = ( cellBC_intBC_allele_groups.agg({"UMI": "count", "readCount": "sum"}) .sort_values("UMI", ascending=False) .reset_index() ) for (cellBC, allele), intBC_table in tqdm( molecule_table_agg.groupby(["cellBC", "allele"], sort=False), desc="Error Correcting intBCs", ): for i1 in range(intBC_table.shape[0]): row1 = intBC_table.iloc[i1] intBC1 = row1["intBC"] UMI1 = row1["UMI"] for i2 in range(i1 + 1, intBC_table.shape[0]): row2 = intBC_table.iloc[i2] intBC2 = row2["intBC"] UMI2 = row2["UMI"] total_count = UMI1 + UMI2 proportion = UMI2 / total_count distance = ngs.sequence.levenshtein_distance(intBC1, intBC2) if ( distance <= dist_thresh and proportion < prop and UMI2 <= umi_count_thresh ): key_to_correct = (cellBC, intBC2, allele) molecule_table.loc[ cellBC_intBC_allele_indices[key_to_correct], "intBC" ] = intBC1 logger.info( f"In cellBC {cellBC}, intBC {intBC2} corrected to " f"{intBC1}, correcting {UMI2} UMIs to {UMI1} UMIs." ) return molecule_table def record_stats( molecule_table: pd.DataFrame, ) -> Tuple[np.array, np.array, np.array]: umis_per_intBC = ( molecule_table.groupby(["cellBC", "intBC"], sort=False).size().values ) umis_per_cellBC = molecule_table.groupby("cellBC", sort=False).size().values return ( molecule_table["readCount"].values, umis_per_intBC, umis_per_cellBC, ) def convert_bam_to_df(data_fp: str) -> pd.DataFrame: als = [] with pysam.AlignmentFile( data_fp, ignore_truncation=True, check_sq=False ) as bam_fh: for al in bam_fh: cellBC, UMI, readCount, grpFlag = al.query_name.split("_") seq = al.query_sequence qual = al.query_qualities encode_qual = pysam.array_to_qualitystring(qual) als.append( [ cellBC, UMI, int(readCount), grpFlag, seq, encode_qual, al.query_name, ] ) return pd.DataFrame( als, columns=[ "cellBC", "UMI", "readCount", "grpFlag", "seq", "qual", "readName", ], ) def convert_alleletable_to_character_matrix( alleletable: pd.DataFrame, ignore_intbcs: List[str] = [], allele_rep_thresh: float = 1.0, missing_data_allele: Optional[str] = None, missing_data_state: int = -1, mutation_priors: Optional[pd.DataFrame] = None, cut_sites: Optional[List[str]] = None, collapse_duplicates: bool = True, ) -> Tuple[ pd.DataFrame, Dict[int, Dict[int, float]], Dict[int, Dict[int, str]] ]: if cut_sites is None: cut_sites = get_default_cut_site_columns(alleletable) filtered_samples = defaultdict(OrderedDict) for sample in alleletable.index: cell = alleletable.loc[sample, "cellBC"] intBC = alleletable.loc[sample, "intBC"] if intBC in ignore_intbcs: continue for i, c in enumerate(cut_sites): if intBC not in ignore_intbcs: filtered_samples[cell].setdefault(f"{intBC}{c}", []).append( alleletable.loc[sample, c] ) character_strings = defaultdict(list) allele_counter = defaultdict(OrderedDict) _intbc_uniq = set() allele_dist = defaultdict(list) for s in filtered_samples: for key in filtered_samples[s]: _intbc_uniq.add(key) allele_dist[key].extend(list(set(filtered_samples[s][key]))) intbc_uniq = [] dropped = [] for key in allele_dist.keys(): props = np.unique(allele_dist[key], return_counts=True)[1] props = props / len(allele_dist[key]) if np.any(props > allele_rep_thresh): dropped.append(key) else: intbc_uniq.append(key) print( "Dropping the following intBCs due to lack of diversity with threshold " + str(allele_rep_thresh) + ": " + str(dropped) ) prior_probs = defaultdict(dict) indel_to_charstate = defaultdict(dict) for i in tqdm(range(len(list(intbc_uniq))), desc="Processing characters"): c = list(intbc_uniq)[i] indel_to_charstate[i] = {} for sample in filtered_samples.keys(): if c in filtered_samples[sample]: states = filtered_samples[sample][c] transformed_states = [] for state in states: if type(state) != str and np.isnan(state): transformed_states.append(missing_data_state) continue if state == "NONE" or "None" in state: transformed_states.append(0) elif ( missing_data_allele is not None and state == missing_data_allele ): transformed_states.append(missing_data_state) else: if state in allele_counter[c]: transformed_states.append(allele_counter[c][state]) else: allele_counter[c][state] = ( len(allele_counter[c]) + 1 ) transformed_states.append(allele_counter[c][state]) indel_to_charstate[i][ len(allele_counter[c]) ] = state if mutation_priors is not None: prob = np.mean( mutation_priors.loc[state, "freq"] ) prior_probs[i][len(allele_counter[c])] = float( prob ) if collapse_duplicates: transformed_states = sorted(set(transformed_states)) transformed_states = tuple(transformed_states) if len(transformed_states) == 1: transformed_states = transformed_states[0] character_strings[sample].append(transformed_states) else: character_strings[sample].append(missing_data_state) character_matrix = pd.DataFrame.from_dict( character_strings, orient="index", columns=[f"r{i}" for i in range(1, len(intbc_uniq) + 1)], ) return character_matrix, prior_probs, indel_to_charstate
MIT License
a3data/hermione
hermione/module_templates/__IMPLEMENTED_BASE__/src/ml/model/wrapper.py
Wrapper.get_metrics
python
def get_metrics(self): return self.artifacts["metrics"]
Return metrics Parameters ---------- self : object Wrapper Returns ------- dict
https://github.com/a3data/hermione/blob/4a833e96664fc91c65bdd28b2637c291f4f5a4d6/hermione/module_templates/__IMPLEMENTED_BASE__/src/ml/model/wrapper.py#L150-L162
from joblib import dump, load from datetime import date import mlflow.pyfunc from mlflow import pyfunc from interpret.ext.blackbox import TabularExplainer, MimicExplainer from interpret.ext.glassbox import * import pandas as pd from util import load_yaml, load_json class Wrapper(mlflow.pyfunc.PythonModel): def __init__(self, model=None, preprocessing=None, metrics=None, columns=None): self.artifacts = dict() self.artifacts["model"] = model self.artifacts["preprocessing"] = preprocessing self.artifacts["metrics"] = metrics self.artifacts["columns"] = columns self.artifacts["creation_date"] = date.today() def predict(self, model_input): df_processed = model_input.copy() model = self.artifacts["model"] columns = self.artifacts["columns"] return model.predict(df_processed[columns]) def predict_proba(self, model_input, binary=False): df_processed = model_input.copy() model = self.artifacts["model"] columns = self.artifacts["columns"] if binary: return model.predict_proba(df_processed[columns])[:, 1] else: return model.predict_proba(df_processed[columns]) def load(self, path): return load(path) def save_model(self, path): dump(self, path) @staticmethod def load_model(path): model = pyfunc.load_model(path) return model def save(self, path): path_artifacts = path + "_artifacts.pkl" dump(self.artifacts, path_artifacts) content = load_json("config/arquivos.json") conda_env = load_yaml(content["path_yaml"]) mlflow.pyfunc.save_model( path=path, python_model=self, artifacts={"model": path_artifacts}, conda_env=conda_env, )
Apache License 2.0
ansible-community/ansible-lint
src/ansiblelint/prerun.py
_write_module_stub
python
def _write_module_stub( filename: str, name: str, namespace: Optional[str] = None, collection: Optional[str] = None, ) -> None: body = ANSIBLE_MOCKED_MODULE.format( name=name, collection=collection, namespace=namespace ) with open(filename, "w") as f: f.write(body)
Write module stub to disk.
https://github.com/ansible-community/ansible-lint/blob/306573167ad21c37a5aa72017bda57e1bad28c80/src/ansiblelint/prerun.py#L354-L365
import json import logging import os import pathlib import re import subprocess import sys from functools import lru_cache from typing import Any, Dict, List, Optional, Tuple, Type, Union import packaging import tenacity from packaging import version from ansiblelint.config import ( ansible_collections_path, collection_list, options, parse_ansible_version, ) from ansiblelint.constants import ( ANSIBLE_DEFAULT_ROLES_PATH, ANSIBLE_MIN_VERSION, ANSIBLE_MISSING_RC, ANSIBLE_MOCKED_MODULE, INVALID_CONFIG_RC, INVALID_PREREQUISITES_RC, ) from ansiblelint.loaders import yaml_from_file _logger = logging.getLogger(__name__) def check_ansible_presence(exit_on_error: bool = False) -> Tuple[str, str]: @lru_cache() def _get_ver_err() -> Tuple[str, str]: err = "" failed = False ver = "" result = subprocess.run( args=["ansible", "--version"], stdout=subprocess.PIPE, universal_newlines=True, check=False, ) if result.returncode != 0: return ( ver, "FATAL: Unable to retrieve ansible cli version: %s" % result.stdout, ) ver, error = parse_ansible_version(result.stdout) if error is not None: return "", error try: from ansible.release import __version__ as ansible_module_version if version.parse(ansible_module_version) < version.parse( ANSIBLE_MIN_VERSION ): failed = True except (ImportError, ModuleNotFoundError) as e: failed = True ansible_module_version = "none" err += f"{e}\n" if failed: err += ( "FATAL: ansible-lint requires a version of Ansible package" " >= %s, but %s was found. " "Please install a compatible version using the same python interpreter. See " "https://docs.ansible.com/ansible/latest/installation_guide" "/intro_installation.html#installing-ansible-with-pip" % (ANSIBLE_MIN_VERSION, ansible_module_version) ) elif ver != ansible_module_version: err = ( f"FATAL: Ansible CLI ({ver}) and python module" f" ({ansible_module_version}) versions do not match. This " "indicates a broken execution environment." ) return ver, err ver, err = _get_ver_err() if exit_on_error and err: _logger.error(err) sys.exit(ANSIBLE_MISSING_RC) return ver, err def install_collection(collection: str, destination: Optional[str] = None) -> None: cmd = [ "ansible-galaxy", "collection", "install", "--force", "-v", ] if destination: cmd.extend(["-p", destination]) cmd.append(f"{collection}") _logger.info("Running %s", " ".join(cmd)) run = subprocess.run( cmd, universal_newlines=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) if run.returncode != 0: _logger.error("Command returned %s code:\n%s", run.returncode, run.stdout) sys.exit(INVALID_PREREQUISITES_RC) @tenacity.retry( reraise=True, wait=tenacity.wait_fixed(30), stop=tenacity.stop_after_attempt(3), before_sleep=tenacity.after_log(_logger, logging.WARNING), ) def install_requirements(requirement: str) -> None: if not os.path.exists(requirement): return cmd = [ "ansible-galaxy", "role", "install", "--force", "--roles-path", f"{options.cache_dir}/roles", "-vr", f"{requirement}", ] _logger.info("Running %s", " ".join(cmd)) run = subprocess.run( cmd, universal_newlines=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) if run.returncode != 0: _logger.error(run.stdout) raise RuntimeError(run.returncode) if "collections" in yaml_from_file(requirement): cmd = [ "ansible-galaxy", "collection", "install", "--force", "-p", f"{options.cache_dir}/collections", "-vr", f"{requirement}", ] _logger.info("Running %s", " ".join(cmd)) run = subprocess.run( cmd, universal_newlines=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) if run.returncode != 0: _logger.error(run.stdout) raise RuntimeError(run.returncode) def prepare_environment(required_collections: Optional[Dict[str, str]] = None) -> None: if not options.configured: from ansiblelint.__main__ import initialize_options initialize_options() if not options.offline: install_requirements("requirements.yml") for req in pathlib.Path(".").glob("molecule/*/requirements.yml"): install_requirements(str(req)) if required_collections: for name, min_version in required_collections.items(): install_collection( f"{name}:>={min_version}", destination=f"{options.cache_dir}/collections" if options.cache_dir else None, ) _install_galaxy_role() _perform_mockings() _prepare_ansible_paths() def _get_galaxy_role_ns(galaxy_infos: Dict[str, Any]) -> str: role_namespace = galaxy_infos.get('namespace', "") if len(role_namespace) == 0: role_namespace = galaxy_infos.get('author', "") if re.match(r"^\w+ \w+", role_namespace): role_namespace = "" else: role_namespace = f"{role_namespace}." if not isinstance(role_namespace, str): raise RuntimeError("Role namespace must be string, not %s" % role_namespace) return role_namespace def _get_galaxy_role_name(galaxy_infos: Dict[str, Any]) -> str: return galaxy_infos.get('role_name', "") def _get_role_fqrn(galaxy_infos: Dict[str, Any]) -> str: role_namespace = _get_galaxy_role_ns(galaxy_infos) role_name = _get_galaxy_role_name(galaxy_infos) if len(role_name) == 0: role_name = pathlib.Path(".").absolute().name role_name = re.sub(r'(ansible-|ansible-role-)', '', role_name) return f"{role_namespace}{role_name}" def _install_galaxy_role() -> None: if not os.path.exists("meta/main.yml"): return yaml = yaml_from_file("meta/main.yml") if 'galaxy_info' not in yaml: return fqrn = _get_role_fqrn(yaml['galaxy_info']) if 'role-name' not in options.skip_list: if not re.match(r"[a-z0-9][a-z0-9_]+\.[a-z][a-z0-9_]+$", fqrn): msg = ( """\ Computed fully qualified role name of %s does not follow current galaxy requirements. Please edit meta/main.yml and assure we can correctly determine full role name: galaxy_info: role_name: my_name # if absent directory name hosting role is used instead namespace: my_galaxy_namespace # if absent, author is used instead Namespace: https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespace-limitations Role: https://galaxy.ansible.com/docs/contributing/creating_role.html#role-names As an alternative, you can add 'role-name' to either skip_list or warn_list. """ % fqrn ) if 'role-name' in options.warn_list: _logger.warning(msg) else: _logger.error(msg) sys.exit(INVALID_PREREQUISITES_RC) else: if 'role_name' in yaml['galaxy_info']: role_namespace = _get_galaxy_role_ns(yaml['galaxy_info']) role_name = _get_galaxy_role_name(yaml['galaxy_info']) fqrn = f"{role_namespace}{role_name}" else: fqrn = pathlib.Path(".").absolute().name p = pathlib.Path(f"{options.cache_dir}/roles") p.mkdir(parents=True, exist_ok=True) link_path = p / fqrn target = pathlib.Path(options.project_dir).absolute() if not link_path.exists() or os.readlink(link_path) != str(target): if link_path.exists(): link_path.unlink() link_path.symlink_to(target, target_is_directory=True) _logger.info( "Using %s symlink to current repository in order to enable Ansible to find the role using its expected full name.", link_path, ) def _prepare_ansible_paths() -> None: library_paths: List[str] = [] roles_path: List[str] = [] for path_list, path in ( (library_paths, "plugins/modules"), (library_paths, f"{options.cache_dir}/modules"), (collection_list, f"{options.cache_dir}/collections"), (roles_path, "roles"), (roles_path, f"{options.cache_dir}/roles"), ): if path not in path_list and os.path.exists(path): path_list.append(path) _update_env('ANSIBLE_LIBRARY', library_paths) _update_env(ansible_collections_path(), collection_list) _update_env('ANSIBLE_ROLES_PATH', roles_path, default=ANSIBLE_DEFAULT_ROLES_PATH) def _make_module_stub(module_name: str) -> None: if re.match(r"^(\w+|\w+\.\w+\.[\.\w]+)$", module_name): parts = module_name.split(".") if len(parts) < 3: path = f"{options.cache_dir}/modules" module_file = f"{options.cache_dir}/modules/{module_name}.py" namespace = None collection = None else: namespace = parts[0] collection = parts[1] path = f"{ options.cache_dir }/collections/ansible_collections/{ namespace }/{ collection }/plugins/modules/{ '/'.join(parts[2:-1]) }" module_file = f"{path}/{parts[-1]}.py" os.makedirs(path, exist_ok=True) _write_module_stub( filename=module_file, name=module_file, namespace=namespace, collection=collection, ) else: _logger.error("Config error: %s is not a valid module name.", module_name) sys.exit(INVALID_CONFIG_RC)
MIT License
dmontagu/fastapi-utils
fastapi_utils/api_settings.py
get_api_settings
python
def get_api_settings() -> APISettings: return APISettings()
This function returns a cached instance of the APISettings object. Caching is used to prevent re-reading the environment every time the API settings are used in an endpoint. If you want to change an environment variable and reset the cache (e.g., during testing), this can be done using the `lru_cache` instance method `get_api_settings.cache_clear()`.
https://github.com/dmontagu/fastapi-utils/blob/af95ff4a8195caaa9edaa3dbd5b6eeb09691d9c7/fastapi_utils/api_settings.py#L60-L69
from functools import lru_cache from typing import Any, Dict from pydantic import BaseSettings class APISettings(BaseSettings): debug: bool = False docs_url: str = "/docs" openapi_prefix: str = "" openapi_url: str = "/openapi.json" redoc_url: str = "/redoc" title: str = "FastAPI" version: str = "0.1.0" disable_docs: bool = False @property def fastapi_kwargs(self) -> Dict[str, Any]: fastapi_kwargs: Dict[str, Any] = { "debug": self.debug, "docs_url": self.docs_url, "openapi_prefix": self.openapi_prefix, "openapi_url": self.openapi_url, "redoc_url": self.redoc_url, "title": self.title, "version": self.version, } if self.disable_docs: fastapi_kwargs.update({"docs_url": None, "openapi_url": None, "redoc_url": None}) return fastapi_kwargs class Config: env_prefix = "api_" validate_assignment = True @lru_cache()
MIT License
therve/twotp
twotp/packer.py
Packer.pack_float
python
def pack_float(self, term): term = "%.20e" % (term,) packetData = self.packChar(self.MAGIC_FLOAT) packetData += term nullPadStr = "\0" * (31 - len(term)) return packetData + nullPadStr
Pack a float.
https://github.com/therve/twotp/blob/67d0c9475c5c211e8f9d6280f8c3e04fff944a73/twotp/packer.py#L110-L118
import struct import zlib from twotp.term import ConstantHolder, Atom class UnhandledClass(KeyError): class Packer(ConstantHolder): MAX_INT = pow(2, 32) MAX_SHORT = pow(2, 16) MAX_CHAR = pow(2, 8) def packChar(self, char): return chr(char) def packShort(self, short): if short >= self.MAX_SHORT: raise ValueError("Number too big to fit in short: %s" % (short,)) return struct.pack("!H", short) def packInt(self, integer): if integer >= self.MAX_INT: raise ValueError("Number too big to fit in int: %s" % (integer,)) return struct.pack("!i", integer) def _pack_id(self, term, maxSignificantBits=18): return self.packInt(term & ((1 << maxSignificantBits) - 1)) def _pack_creation(self, term): return self.packChar(term & ((1 << 2) - 1)) def pack_int(self, term): if 0 <= term < self.MAX_CHAR: return (self.packChar(self.MAGIC_SMALL_INTEGER) + self.packChar(term)) elif -2 ** 31 <= term < 2 ** 31: return self.packChar(self.MAGIC_INTEGER) + self.packInt(term) else: sign = int(term < 0) term = abs(term) a = 1 n = 0 while a < term: n += 1 a = 256 ** n if n < 256: data = self.packChar(self.MAGIC_SMALL_BIG) + self.packChar(n) else: data = self.packChar(self.MAGIC_LARGE_BIG) + self.packInt(n) data += self.packChar(sign) content = [] for i in xrange(n - 1, -1, -1): c = term // 256 ** i content.append(self.packChar(c)) term = term - (256 ** i) * c content.reverse() return data + "".join(content) pack_long = pack_int
MIT License
bigpon/qpnet
src/nets/qpnet.py
DilatedConv1d.forward
python
def forward(self, xC, xP): xC = self.convC(xC) xP = self.convP(xP) return xC + xP
Forward calculation Arg: xC (tensor): float tensor variable with the shape (B x C x T) xP (tensor): float tensor variable with the shape (B x C x T) Return: (tensor): float tensor variable with the shape (B x C x T)
https://github.com/bigpon/qpnet/blob/657fcb01b23e9e3371b5a4b2ebeec5757ad33e2d/src/nets/qpnet.py#L98-L108
from __future__ import division import logging import sys import time import yaml import torch import numpy as np import torch.nn.functional as F from torch import nn from numpy.matlib import repmat def encode_mu_law(x, mu=256): mu = mu - 1 fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu) return np.floor((fx + 1) / 2 * mu + 0.5).astype(np.int64) def decode_mu_law(y, mu=256): mu = mu - 1 fx = (y - 0.5) / mu * 2 - 1 x = np.sign(fx) / mu * ((1 + mu) ** np.abs(fx) - 1) return x def initialize(m): if isinstance(m, nn.Conv1d): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0.0) if isinstance(m, nn.ConvTranspose2d): nn.init.constant_(m.weight, 1.0) nn.init.constant_(m.bias, 0.0) class OneHot(nn.Module): def __init__(self, depth): super(OneHot, self).__init__() self.depth = depth def forward(self, x): x = x % self.depth x = torch.unsqueeze(x, 2) x_onehot = x.new_zeros(x.size(0), x.size(1), self.depth).float() return x_onehot.scatter_(2, x, 1) def FDilatedConv1d(xC, xP, nnModule): convC = nnModule.convC convP = nnModule.convP output = F.conv1d(xC, convC.weight, convC.bias) + F.conv1d(xP, convP.weight, convP.bias) return output class DilatedConv1d(nn.Module): def __init__(self, in_channels, out_channels, bias=True): super(DilatedConv1d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.convC = nn.Conv1d(in_channels, out_channels, 1, bias=bias) self.convP = nn.Conv1d(in_channels, out_channels, 1, bias=bias)
Apache License 2.0
paddlepaddle/paddle
python/paddle/fluid/layers/sequence_lod.py
sequence_slice
python
def sequence_slice(input, offset, length, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper("sequence_slice", **locals()) check_variable_and_dtype(input, 'input', ['float32', 'float64', 'int32', 'int64'], 'sequence_slice') check_variable_and_dtype(offset, 'offset', ['int32', 'int64'], 'sequence_slice') check_variable_and_dtype(length, 'length', ['int32', 'int64'], 'sequence_slice') dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) offset.stop_gradient = True length.stop_gradient = True helper.append_op( type="sequence_slice", inputs={"X": input, "Offset": offset, "Length": length}, outputs={"Out": out}) return out
:api_attr: Static Graph **Sequence Slice Layer** The layer crops a subsequence from given sequence with given start offset and subsequence length. It only supports sequence data (LoDTensor with lod_level equal to 1). .. code-block:: text - Case: Given the input Variable **input**: input.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]], input.lod = [[3, 2]], input.dims = (5, 2), with offset.data = [[0], [1]] and length.data = [[2], [1]], the output Variable will be out.data = [[a1, a2], [b1, b2], [e1, e2]], out.lod = [[2, 1]], out.dims = (3, 2). Note: The first dimension size of **input**, **offset** and **length** should be equal. The **offset** should start from 0. Args: input(Variable): LoDTensor, The input Variable which consists of the complete sequences.The data type can be float32, float64, int32 or int64 offset(Variable): LoDTensor, The offset to slice each sequence. The data type is int32 or int64. length(Variable): LoDTensor, The length of each subsequence. The data type is int32 or int64. name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: The output subsequences. Examples: .. code-block:: python import paddle paddle.enable_static() import numpy as np seqs = paddle.static.data(name='x', shape=[10, 5], dtype='float32', lod_level=1) offset = paddle.assign(np.array([[0, 1]]).astype("int32")) length = paddle.assign(np.array([[2, 1]]).astype("int32")) subseqs = paddle.static.nn.sequence_slice(input=seqs, offset=offset, length=length)
https://github.com/paddlepaddle/paddle/blob/056b87414880e0520bb4560fc40d5b62db9c5175/python/paddle/fluid/layers/sequence_lod.py#L560-L647
from __future__ import print_function from .layer_function_generator import templatedoc from ..framework import Variable, in_dygraph_mode from ..layer_helper import LayerHelper from ..data_feeder import check_variable_and_dtype, check_type, check_dtype from ..core import VarDesc __all__ = [ 'sequence_conv', 'sequence_softmax', 'sequence_pool', 'sequence_concat', 'sequence_first_step', 'sequence_last_step', 'sequence_slice', 'sequence_expand', 'sequence_expand_as', 'sequence_pad', 'sequence_unpad', 'sequence_reshape', 'sequence_scatter', 'sequence_enumerate', 'sequence_mask', 'sequence_reverse', ] @templatedoc() def sequence_conv(input, num_filters, filter_size=3, filter_stride=1, padding=True, padding_start=None, bias_attr=None, param_attr=None, act=None, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'sequence_conv') helper = LayerHelper('sequence_conv', **locals()) dtype = helper.input_dtype() filter_shape = [filter_size * input.shape[1], num_filters] filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype) pre_bias = helper.create_variable_for_type_inference(dtype) if padding_start is None: padding_start = -int(filter_size // 2) helper.append_op( type='sequence_conv', inputs={ 'X': [input], 'Filter': [filter_param], }, outputs={"Out": pre_bias}, attrs={ 'contextStride': filter_stride, 'contextStart': padding_start, 'contextLength': filter_size, }) pre_act = helper.append_bias_op(pre_bias) return helper.append_activation(pre_act) def sequence_softmax(input, use_cudnn=False, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_softmax', **locals()) check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'sequence_softmax') dtype = helper.input_dtype() softmax_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sequence_softmax", inputs={"X": input}, outputs={"Out": softmax_out}, attrs={"use_cudnn": use_cudnn}) return softmax_out def sequence_pool(input, pool_type, is_test=False, pad_value=0.0): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'sequence_pool') helper = LayerHelper('sequence_pool', **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) max_index = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sequence_pool", inputs={"X": input}, outputs={"Out": pool_out, "MaxIndex": max_index}, attrs={ "pooltype": pool_type.upper(), "is_test": is_test, "pad_value": pad_value }) if pool_type == 'max': max_index.stop_gradient = True return pool_out @templatedoc() def sequence_concat(input, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_concat', **locals()) check_type(input, 'input', list, 'fluid.layers.sequence_concat') for i, input_x in enumerate(input): check_variable_and_dtype(input_x, 'input[' + str(i) + ']', ['int64', 'float32', 'float64'], 'fluid.layers.sequence_concat') out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='sequence_concat', inputs={'X': input}, outputs={'Out': [out]}) return out def sequence_first_step(input): check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'sequence_first_step') return sequence_pool(input=input, pool_type="first") def sequence_last_step(input): check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'sequence_last_step') return sequence_pool(input=input, pool_type="last")
Apache License 2.0
azure/azure-devops-cli-extension
azure-devops/azext_devops/devops_sdk/v5_1/work_item_tracking/work_item_tracking_client.py
WorkItemTrackingClient.delete_comment_reaction
python
def delete_comment_reaction(self, project, work_item_id, comment_id, reaction_type): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if work_item_id is not None: route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int') if comment_id is not None: route_values['commentId'] = self._serialize.url('comment_id', comment_id, 'int') if reaction_type is not None: route_values['reactionType'] = self._serialize.url('reaction_type', reaction_type, 'CommentReactionType') response = self._send(http_method='DELETE', location_id='f6cb3f27-1028-4851-af96-887e570dc21f', version='5.1-preview.1', route_values=route_values) return self._deserialize('CommentReaction', response)
DeleteCommentReaction. [Preview API] Deletes an existing reaction on a comment. :param str project: Project ID or project name :param int work_item_id: WorkItem ID :param int comment_id: Comment ID :param CommentReactionType reaction_type: Type of the reaction :rtype: :class:`<CommentReaction> <azure.devops.v5_1.work_item_tracking.models.CommentReaction>`
https://github.com/azure/azure-devops-cli-extension/blob/5f33f7d81a9c2d2990044fbd9ffa6b535cbda528/azure-devops/azext_devops/devops_sdk/v5_1/work_item_tracking/work_item_tracking_client.py#L521-L543
 from msrest import Serializer, Deserializer from ...client import Client from . import models class WorkItemTrackingClient(Client): def __init__(self, base_url=None, creds=None): super(WorkItemTrackingClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = '5264459e-e5e0-4bd8-b118-0985e68a4ec5' def get_recent_activity_data(self): response = self._send(http_method='GET', location_id='1bc988f4-c15f-4072-ad35-497c87e3a909', version='5.1-preview.2') return self._deserialize('[AccountRecentActivityWorkItemModel2]', self._unwrap_collection(response)) def get_work_artifact_link_types(self): response = self._send(http_method='GET', location_id='1a31de40-e318-41cd-a6c6-881077df52e3', version='5.1-preview.1') return self._deserialize('[WorkArtifactLink]', self._unwrap_collection(response)) def query_work_items_for_artifact_uris(self, artifact_uri_query, project=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(artifact_uri_query, 'ArtifactUriQuery') response = self._send(http_method='POST', location_id='a9a9aa7a-8c09-44d3-ad1b-46e855c1e3d3', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('ArtifactUriQueryResult', response) def create_attachment(self, upload_stream, project=None, file_name=None, upload_type=None, area_path=None, **kwargs): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') if upload_type is not None: query_parameters['uploadType'] = self._serialize.query('upload_type', upload_type, 'str') if area_path is not None: query_parameters['areaPath'] = self._serialize.query('area_path', area_path, 'str') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='POST', location_id='e07b5fa4-1499-494d-a496-64b860fd64ff', version='5.1', route_values=route_values, query_parameters=query_parameters, content=content, media_type='application/octet-stream') return self._deserialize('AttachmentReference', response) def get_attachment_content(self, id, project=None, file_name=None, download=None, **kwargs): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if id is not None: route_values['id'] = self._serialize.url('id', id, 'str') query_parameters = {} if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') if download is not None: query_parameters['download'] = self._serialize.query('download', download, 'bool') response = self._send(http_method='GET', location_id='e07b5fa4-1499-494d-a496-64b860fd64ff', version='5.1', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_attachment_zip(self, id, project=None, file_name=None, download=None, **kwargs): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if id is not None: route_values['id'] = self._serialize.url('id', id, 'str') query_parameters = {} if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') if download is not None: query_parameters['download'] = self._serialize.query('download', download, 'bool') response = self._send(http_method='GET', location_id='e07b5fa4-1499-494d-a496-64b860fd64ff', version='5.1', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/zip') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_classification_nodes(self, project, ids, depth=None, error_policy=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if ids is not None: ids = ",".join(map(str, ids)) query_parameters['ids'] = self._serialize.query('ids', ids, 'str') if depth is not None: query_parameters['$depth'] = self._serialize.query('depth', depth, 'int') if error_policy is not None: query_parameters['errorPolicy'] = self._serialize.query('error_policy', error_policy, 'str') response = self._send(http_method='GET', location_id='a70579d1-f53a-48ee-a5be-7be8659023b9', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response)) def get_root_nodes(self, project, depth=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if depth is not None: query_parameters['$depth'] = self._serialize.query('depth', depth, 'int') response = self._send(http_method='GET', location_id='a70579d1-f53a-48ee-a5be-7be8659023b9', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response)) def create_or_update_classification_node(self, posted_node, project, structure_group, path=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if structure_group is not None: route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup') if path is not None: route_values['path'] = self._serialize.url('path', path, 'str') content = self._serialize.body(posted_node, 'WorkItemClassificationNode') response = self._send(http_method='POST', location_id='5a172953-1b41-49d3-840a-33f79c3ce89f', version='5.1', route_values=route_values, content=content) return self._deserialize('WorkItemClassificationNode', response) def delete_classification_node(self, project, structure_group, path=None, reclassify_id=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if structure_group is not None: route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup') if path is not None: route_values['path'] = self._serialize.url('path', path, 'str') query_parameters = {} if reclassify_id is not None: query_parameters['$reclassifyId'] = self._serialize.query('reclassify_id', reclassify_id, 'int') self._send(http_method='DELETE', location_id='5a172953-1b41-49d3-840a-33f79c3ce89f', version='5.1', route_values=route_values, query_parameters=query_parameters) def get_classification_node(self, project, structure_group, path=None, depth=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if structure_group is not None: route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup') if path is not None: route_values['path'] = self._serialize.url('path', path, 'str') query_parameters = {} if depth is not None: query_parameters['$depth'] = self._serialize.query('depth', depth, 'int') response = self._send(http_method='GET', location_id='5a172953-1b41-49d3-840a-33f79c3ce89f', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('WorkItemClassificationNode', response) def update_classification_node(self, posted_node, project, structure_group, path=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if structure_group is not None: route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup') if path is not None: route_values['path'] = self._serialize.url('path', path, 'str') content = self._serialize.body(posted_node, 'WorkItemClassificationNode') response = self._send(http_method='PATCH', location_id='5a172953-1b41-49d3-840a-33f79c3ce89f', version='5.1', route_values=route_values, content=content) return self._deserialize('WorkItemClassificationNode', response) def get_engaged_users(self, project, work_item_id, comment_id, reaction_type, top=None, skip=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if work_item_id is not None: route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int') if comment_id is not None: route_values['commentId'] = self._serialize.url('comment_id', comment_id, 'int') if reaction_type is not None: route_values['reactionType'] = self._serialize.url('reaction_type', reaction_type, 'CommentReactionType') query_parameters = {} if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') response = self._send(http_method='GET', location_id='e33ca5e0-2349-4285-af3d-d72d86781c35', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[IdentityRef]', self._unwrap_collection(response)) def add_comment(self, request, project, work_item_id): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if work_item_id is not None: route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int') content = self._serialize.body(request, 'CommentCreate') response = self._send(http_method='POST', location_id='608aac0a-32e1-4493-a863-b9cf4566d257', version='5.1-preview.3', route_values=route_values, content=content) return self._deserialize('Comment', response) def delete_comment(self, project, work_item_id, comment_id): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if work_item_id is not None: route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int') if comment_id is not None: route_values['commentId'] = self._serialize.url('comment_id', comment_id, 'int') self._send(http_method='DELETE', location_id='608aac0a-32e1-4493-a863-b9cf4566d257', version='5.1-preview.3', route_values=route_values) def get_comment(self, project, work_item_id, comment_id, include_deleted=None, expand=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if work_item_id is not None: route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int') if comment_id is not None: route_values['commentId'] = self._serialize.url('comment_id', comment_id, 'int') query_parameters = {} if include_deleted is not None: query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool') if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') response = self._send(http_method='GET', location_id='608aac0a-32e1-4493-a863-b9cf4566d257', version='5.1-preview.3', route_values=route_values, query_parameters=query_parameters) return self._deserialize('Comment', response) def get_comments(self, project, work_item_id, top=None, continuation_token=None, include_deleted=None, expand=None, order=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if work_item_id is not None: route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int') query_parameters = {} if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if include_deleted is not None: query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool') if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') if order is not None: query_parameters['order'] = self._serialize.query('order', order, 'str') response = self._send(http_method='GET', location_id='608aac0a-32e1-4493-a863-b9cf4566d257', version='5.1-preview.3', route_values=route_values, query_parameters=query_parameters) return self._deserialize('CommentList', response) def get_comments_batch(self, project, work_item_id, ids, include_deleted=None, expand=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if work_item_id is not None: route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int') query_parameters = {} if ids is not None: ids = ",".join(map(str, ids)) query_parameters['ids'] = self._serialize.query('ids', ids, 'str') if include_deleted is not None: query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool') if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') response = self._send(http_method='GET', location_id='608aac0a-32e1-4493-a863-b9cf4566d257', version='5.1-preview.3', route_values=route_values, query_parameters=query_parameters) return self._deserialize('CommentList', response) def update_comment(self, request, project, work_item_id, comment_id): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if work_item_id is not None: route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int') if comment_id is not None: route_values['commentId'] = self._serialize.url('comment_id', comment_id, 'int') content = self._serialize.body(request, 'CommentUpdate') response = self._send(http_method='PATCH', location_id='608aac0a-32e1-4493-a863-b9cf4566d257', version='5.1-preview.3', route_values=route_values, content=content) return self._deserialize('Comment', response) def create_comment_reaction(self, project, work_item_id, comment_id, reaction_type): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if work_item_id is not None: route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int') if comment_id is not None: route_values['commentId'] = self._serialize.url('comment_id', comment_id, 'int') if reaction_type is not None: route_values['reactionType'] = self._serialize.url('reaction_type', reaction_type, 'CommentReactionType') response = self._send(http_method='PUT', location_id='f6cb3f27-1028-4851-af96-887e570dc21f', version='5.1-preview.1', route_values=route_values) return self._deserialize('CommentReaction', response)
MIT License
google/clusterfuzz
src/clusterfuzz/_internal/base/retry.py
get_delay
python
def get_delay(num_try, delay, backoff): return delay * (backoff**(num_try - 1))
Compute backoff delay.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/clusterfuzz/_internal/base/retry.py#L32-L34
import functools import inspect import sys import time from clusterfuzz._internal.metrics import logs def sleep(seconds): time.sleep(seconds)
Apache License 2.0
dynatrace/dynatrace-cli
dtcli.py
parsePipelineInfo
python
def parsePipelineInfo(pipelineinfofile): pipelineinfo = None with open(pipelineinfofile) as json_data: pipelineinfo = json.load(json_data) return pipelineinfo
will parse the pipelineinfo file
https://github.com/dynatrace/dynatrace-cli/blob/4954a85fddce4db3723d1d5c9a0e5e5ba937003d/dtcli.py#L517-L524
import sys import io import re import os import json import time import datetime import operator import urllib import requests import urllib3 import uuid urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) API_ENDPOINT_APPLICATIONS = "/api/v1/entity/applications" API_ENDPOINT_SERVICES = "/api/v1/entity/services" API_ENDPOINT_PROCESS_GROUPS = "/api/v1/entity/infrastructure/process-groups" API_ENDPOINT_HOSTS = "/api/v1/entity/infrastructure/hosts" API_ENDPOINT_PROCESSES = "/api/v1/entity/infrastructure/processes" API_ENDPOINT_CUSTOM = "/api/v1/entity/infrastructure/custom" API_ENDPOINT_TIMESERIES = "/api/v1/timeseries" API_ENDPOINT_THRESHOLDS = "/api/v1/thresholds" API_ENDPOINT_EVENTS = "/api/v1/events" API_ENDPOINT_PROBLEMS = "/api/v1/problem" HTTP_GET = "GET" HTTP_POST = "POST" HTTP_PUT = "PUT" HTTP_DELETE = "DELETE" MONSPEC_PERFSIGNATURE = "perfsignature" MONSPEC_PERFSIGNATURE_TIMESERIES = "timeseries" MONSPEC_PERFSIGNATURE_AGGREGATE = "aggregate" MONSPEC_PERFSIGNATURE_SMARTSCAPE = "smartscape" MONSPEC_PERFSIGNATURE_METRICID = "metricId" MONSPEC_PERFSIGNATURE_METRICDEF = "metricDef" MONSPEC_PERFSIGNATURE_SOURCE = "source" MONSPEC_PERFSIGNATURE_COMPARE = "compare" MONSPEC_PERFSIGNATURE_THRESHOLD = "threshold" MONSPEC_PERFSIGNATURE_RESULT = "result" MONSPEC_PERFSIGNATURE_RESULT_COMPARE = "result_compare" MONSPEC_PERFSIGNATURE_UPPERLIMIT = "upperlimit" MONSPEC_PERFSIGNATURE_LOWERLIMIT = "lowerlimit" MONSPEC_DISPLAYNAME = "displayName" MONSPEC_METRICTYPE_SERVICE = "Monspec Service Metric" MONSPEC_METRICTYPE_SMARTSCAPE = "Monspec Smartscape Metric" MONSPEC_DATAHANDLING_NORMAL = 0 MONSPEC_DATAHANDLING_IGNORE_ERROR = 1 MONSPEC_DATAHANDLING_DEMODATA = -1 dtconfigfilename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dtconfig.json") config = { "tenanthost" : "smpljson", "apitoken" : "smpltoken", "cacheupdate" : -1, "cachedir" : "", "debug" : 0 } global_doPrint = False global_timestampcheck = datetime.datetime(2000, 1, 1, ).timestamp() def getAuthenticationHeader(): return {"Authorization" : "Api-Token " + config["apitoken"]} def getRequestUrl(apiEndpoint, queryString): requestUrl = config["tenanthost"] + apiEndpoint if(not requestUrl.startswith("https://")) : requestUrl = "https://" + requestUrl; if(queryString is not None and len(queryString) > 0): requestUrl += "?" + queryString return requestUrl def getCacheFilename(apiEndpoint, queryString): cachedir = getAttributeOrNone(config, "cachdir") if cachedir is None or cachedir == "": cachedir = os.path.dirname(os.path.abspath(__file__)) fullCacheFilename = os.path.join(os.path.dirname(os.path.abspath(__file__)), cachedir, config["tenanthost"].replace(".", "_").replace(":","_"), apiEndpoint.replace("/","_")) if(queryString is not None and len(queryString) > 0): os.path.join(fullCacheFilename, urllib.parse.unquote(queryString).replace(".", "_").replace(":", "_").replace("?", "_").replace("&", "_")) fullCacheFilename += ".json" return fullCacheFilename def encodeString(strValue): encodedStrValue = strValue.replace(" ", "%20") return encodedStrValue class NameValue: def __init__(self, defaultName, defaultValue): self.name = defaultName if(defaultValue.startswith("[") and defaultValue.endswith("]")): json.load(defaultValue) else: self.value = defaultValue class TimeframeDef: def __init__(self, timeframe): self.timeframestr = timeframe if timeframe is None: self.timeframestr = [None] return self.type = [] self.timestamp = [] self.allowedConsts = ["hour", "2hours", "6hours", "day", "week", "month"] self.timeframestr = timeframe.split(":") for timeframe in self.timeframestr: if operator.contains(self.allowedConsts, timeframe): self.type.append("relative") elif timeframe.isdigit(): tsint = int(timeframe) if tsint < global_timestampcheck: self.timestamp.append(1000 * int(datetime.datetime.now().timestamp() - tsint*60)) else: self.timestamp.append(int(timeframe)) self.type.append("absolute") else: self.timestamp.append(None) self.type.append(None) def isTimerange(self): return self.isValid() and len(self.timeframestr) > 1 def getNowAsStringForWebUI(self): return str(1000*datetime.datetime.now().timestamp()) def timeframeAsStrForWebUI(self, frame=0): if self.isRelative(frame): webUIConsts = ["l_1_HOURS", "l_2_HOURS", "l_6_HOURS", "l_24_HOURS", "l_7_DAYS", "l_30_DAYS"] ix = operator.indexOf(self.allowedConsts, self.timeframestr[frame]) return webUIConsts[ix] else: return str(self.timestamp[frame]) def timeframeAsStr(self, frame=0): if self.isRelative(): return self.timeframestr[frame] return str(self.timestamp[frame]) def isValid(self, frame=0): return self.timeframestr[frame] is not None def isRelative(self, frame=0): return self.type[frame] == "relative" def isAbsolute(self, frame=0): return self.type[frame] == "absolute" def isNumeric(value): try: numValue = int(value) except: return False return True def parseNameValue(nameValue, defaultName, defaultValue): if(nameValue is None): return NameValue(defaultName, defaultValue) equalSign = nameValue.find("=") if(equalSign < 0): return NameValue(defaultName, nameValue) partitions = nameValue.partition("=") return NameValue(partitions[0], partitions[2]) def queryDynatraceAPI(isGet, apiEndpoint, queryString, postBody): if isGet : httpMethod = HTTP_GET else: httpMethod = HTTP_POST return queryDynatraceAPIEx(httpMethod, apiEndpoint, queryString, postBody) def queryDynatraceAPIEx(httpMethod, apiEndpoint, queryString, postBody): if (getAttributeOrDefault(config, "debug", 0) == 1) : print("DEBUG - queryDynatraceAPIEx: " + apiEndpoint + "?" + queryString + " - BODY: " + str(postBody)) fullCacheFilename = getCacheFilename(apiEndpoint, queryString) if (getAttributeOrDefault(config, "debug", 0) == 1) : print("DEBUG - getCacheFilename: " + fullCacheFilename) readFromCache = False if(os.path.isfile(fullCacheFilename)): cacheupdate = getAttributeOrNone(config, "cacheupdate") if(cacheupdate is None): cacheupdate = -1 else: cacheupdate = int(config["cacheupdate"]) if(cacheupdate == -1): readFromCache = True if(cacheupdate > 0): now = datetime.datetime.now() lastModified = datetime.datetime.fromtimestamp(os.path.getmtime(fullCacheFilename)) if((now - lastModified).seconds < cacheupdate): readFromCache = True jsonContent = None if (httpMethod == HTTP_GET) and readFromCache: if (getAttributeOrDefault(config, "debug", 0) == 1) : print("Read from Cache!") with open(fullCacheFilename) as json_data: jsonContent = json.load(json_data) else: myResponse = None if httpMethod == HTTP_GET: myResponse = requests.get(getRequestUrl(apiEndpoint, queryString), headers=getAuthenticationHeader(), verify=False) elif httpMethod == HTTP_POST: myResponse = requests.post(getRequestUrl(apiEndpoint, queryString), headers=getAuthenticationHeader(), verify=False, json=postBody) elif httpMethod == HTTP_PUT: myResponse = requests.put(getRequestUrl(apiEndpoint, queryString), headers=getAuthenticationHeader(), verify=False, json=postBody) elif httpMethod == HTTP_DELETE: myResponse = requests.delete(getRequestUrl(apiEndpoint, queryString), headers=getAuthenticationHeader(), verify=False, json=postBody) if(myResponse.ok): if(len(myResponse.text) > 0): jsonContent = json.loads(myResponse.text) if (httpMethod == HTTP_GET) and jsonContent is not None: directory = os.path.dirname(fullCacheFilename) if not os.path.exists(directory): os.makedirs(directory) with open(fullCacheFilename, "w+") as output_file: json.dump(jsonContent, output_file) else: jsonContent = json.loads(myResponse.text) errorMessage = "" if(jsonContent["error"]): errorMessage = jsonContent["error"]["message"] if global_doPrint: print("Dynatrace API returned an error: " + errorMessage) jsonContent = None raise Exception("Error", "Dynatrace API returned an error: " + errorMessage) return jsonContent class KeySearch: def __init__(self, key): self.keylistname = None self.contextvalue = None self.contextkeyname = None self.keyvalue = None self.keykeyname = None self.value = None self.valuekeyname = "value" parts = key.partition("/") if(parts[1] == "/"): self.keylistname = parts[0] key = parts[2] parts = key.partition("?") if(parts[1] == "?"): self.valuekeyname = parts[2] key = parts[0] if(len(key) > 0): parts = key.partition(":") if(parts[1] == ":"): self.contextvalue = parts[0] self.contextkeyname = "context" self.keyvalue = parts[2] self.keykeyname = "key" else: if(self.keylistname is not None): self.keyvalue = parts[0] else: self.valuekeyname = parts[0] if(self.contextvalue is not None): parts = self.contextvalue.partition("#") if(parts[1] == "#"): self.contextkeyname = parts[0] self.contextvalue = parts[2] if(self.keyvalue is not None): parts = self.keyvalue.partition("#") if(parts[1] == "#"): self.keykeyname = parts[0] self.keyvalue = parts[2] def isTagSearch(): return self.keylistname is not None def getAttributeFromFirstMatch(attributeName, objectlist): attributeNames = attributeName.split(",") if attributeName == "*": i = len(objectlist)-1 while i>=0: if(objectlist[i] is not None): return objectlist[i] i = i-1; return None for obj in objectlist: try: attributeValues = [] if(obj is not None): for attribute in attributeNames: attributeValue = obj[attribute] if(attributeValue is not None): attributeValues.append(attributeValue) if len(attributeValues) == len(attributeNames): if(len(attributeValues) == 1): return attributeValues[0] return attributeValues except KeyError: x=1 return "OBJECT DOESNT HAVE KEY " + attributeName def jsonFindValuesByKey(jsonContent, key, matchValue, returnKey): return jsonFindValuesByKeyEx(jsonContent, key, matchValue, returnKey, None, None) def jsonFindValuesByKeyEx(jsonContent, key, matchValue, returnKey, parentJsonNodename, parentJsonContent): if((key is not None) and (type(key) == str)): key = KeySearch(key) if((matchValue is not None) and (type(matchValue) == str)): try: matchValue = re.compile(matchValue) except: if global_doPrint: print(matchValue + " is NOT VALID regular expression") raise Exception("Regex Error", matchValue + " is NOT VALID regular expression") result = [] if type(jsonContent) == str: jsonContent = json.loads(jsonContent) if type(jsonContent) is dict: foundValueMatch = None foundContextMatch = key.contextvalue is None foundKeyMatch = key.keyvalue is None for jsonkey in jsonContent: if ((type(jsonContent[jsonkey]) is list) and (jsonkey == key.keyvalue)): if(matchValue is None): foundKeyValueMatch = True else: for listItem in jsonContent[jsonkey]: if(matchValue.match(listItem)): foundKeyValueMatch = True if foundKeyValueMatch is not None: foundKeyValueMatch = getAttributeFromFirstMatch(returnKey, [jsonContent, parentJsonContent]) if ((type(jsonContent[jsonkey]) is list) and (jsonkey == key.valuekeyname)): for listitem in jsonContent[jsonkey]: if(matchValue.match(listitem)): result.append(getAttributeFromFirstMatch(returnKey, [jsonContent, parentJsonContent])) elif type(jsonContent[jsonkey]) in (list, dict): subResult = jsonFindValuesByKeyEx(jsonContent[jsonkey], key, matchValue, returnKey, jsonkey, jsonContent) if(len(subResult) > 0): result.extend(subResult) elif jsonkey == key.valuekeyname: if((jsonContent[jsonkey] is not None) and (matchValue is None or matchValue.match(jsonContent[jsonkey]))): foundValueMatch = getAttributeFromFirstMatch(returnKey, [jsonContent, parentJsonContent]) elif (key.contextvalue is not None) and (jsonkey == key.contextkeyname): foundContextMatch = key.contextvalue == jsonContent[jsonkey] elif (key.keyvalue is not None) and (jsonkey == key.keykeyname): foundKeyMatch = key.keyvalue == jsonContent[jsonkey] if (key.keylistname is None) or (key.keylistname == parentJsonNodename): if((foundValueMatch is not None) and foundContextMatch and foundKeyMatch): result.append(foundValueMatch) elif type(jsonContent) is list: for item in jsonContent: if type(item) in (list, dict): subResult = jsonFindValuesByKeyEx(item, key, matchValue, returnKey, parentJsonNodename, parentJsonContent) if(len(subResult) > 0): result.extend(subResult) return result def matchEntityName(entityName, listOfEntities): if(listOfEntities is None): return True if(type(listOfEntities) is str): return listOfEntities == entityName if(type(listOfEntities) is list): if(entityName in listOfEntities): return True return False def filterDataPointsForEntities(jsonDataPoints, entities): result = {} for entityDataPoint in jsonDataPoints: if matchEntityName(entityDataPoint, entities): result[entityDataPoint] = {} result[entityDataPoint]["dataPoints"] = jsonDataPoints[entityDataPoint] return result def handleException(e): errorObject = {} if e.args: if len(e.args) == 2: errorObject[e.args[0]] = e.args[1] if len(e.args) == 1: errorObject["error"] = e.args[0] else: errorObject["exception"] = e print(errorObject) sys.exit(1) def getAttributeOrDefault(baseobject, attributename, default): attributeValue = getAttributeOrNone(baseobject, attributename) if attributeValue is None: attributeValue = default return attributeValue def getAttributeOrNone(baseobject, attributename): attributeValue = None try : attributeValue = baseobject[attributename] except: attributeValue = None return attributeValue
Apache License 2.0
wildltr/ptranking
ptranking/data/data_utils.py
clip_query_data
python
def clip_query_data(qid, list_docids=None, feature_mat=None, std_label_vec=None, binary_rele=False, unknown_as_zero=False, clip_query=None, min_docs=None, min_rele=1, presort=None): if binary_rele: std_label_vec = np.clip(std_label_vec, a_min=-10, a_max=1) if unknown_as_zero: std_label_vec = np.clip(std_label_vec, a_min=0, a_max=10) if clip_query: if feature_mat.shape[0] < min_docs: return None if (std_label_vec > 0).sum() < min_rele: return None assert presort is not None if presort: des_inds = np_arg_shuffle_ties(std_label_vec, descending=True) feature_mat, std_label_vec = feature_mat[des_inds], std_label_vec[des_inds] return (qid, feature_mat, std_label_vec)
Clip the data associated with the same query if required
https://github.com/wildltr/ptranking/blob/8f54be4dbfa0b0aba4c9c80b647ddbe7e571cf26/ptranking/data/data_utils.py#L406-L435
import os import random import numpy as np from pathlib import Path from enum import Enum, unique, auto from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler import torch import torch.utils.data as data from ptranking.ltr_adhoc.util.bin_utils import batch_count from ptranking.utils.numpy.np_extensions import np_arg_shuffle_ties from ptranking.ltr_adhoc.util.one_hot_utils import get_one_hot_reprs from ptranking.utils.bigdata.BigPickle import pickle_save, pickle_load MSLETOR_SEMI = ['MQ2007_Semi', 'MQ2008_Semi'] MSLETOR_LIST = ['MQ2007_List', 'MQ2008_List'] MSLETOR_SUPER = ['MQ2007_Super', 'MQ2008_Super'] MSLETOR = ['MQ2007_Super', 'MQ2008_Super', 'MQ2007_Semi', 'MQ2008_Semi', 'MQ2007_List', 'MQ2008_List'] IRGAN_MQ2008_SEMI = ['IRGAN_MQ2008_Semi'] MSLRWEB = ['MSLRWEB10K', 'MSLRWEB30K'] YAHOO_LTR = ['Set1', 'Set2'] YAHOO_LTR_5Fold = ['5FoldSet1', '5FoldSet2'] ISTELLA_LTR = ['Istella_S', 'Istella', 'Istella_X'] ISTELLA_MAX = 1000000 GLTR_LIBSVM = ['LTR_LibSVM', 'LTR_LibSVM_K'] GLTR_LETOR = ['LETOR', 'LETOR_K'] SCALER_LEVEL = ['QUERY', 'DATASET'] SCALER_ID = ['MinMaxScaler', 'RobustScaler', 'StandardScaler', "SLog1P"] @unique class MASK_TYPE(Enum): rand_mask_all = auto() rand_mask_rele = auto() @unique class LABEL_TYPE(Enum): MultiLabel = auto() Permutation = auto() @unique class SPLIT_TYPE(Enum): Train = auto() Test = auto() Validation = auto() class SymmetricLog1pScaler(object): @staticmethod def fit_transform(X): return np.sign(X) * np.log(1.0 + np.abs(X)) def get_data_meta(data_id=None): if data_id in MSLRWEB: max_rele_level = 4 label_type = LABEL_TYPE.MultiLabel num_features = 136 has_comment = False fold_num = 5 elif data_id in MSLETOR_SUPER: max_rele_level = 2 label_type = LABEL_TYPE.MultiLabel num_features = 46 has_comment = True fold_num = 5 elif data_id in MSLETOR_SEMI: max_rele_level = 2 label_type = LABEL_TYPE.MultiLabel num_features = 46 has_comment = True fold_num = 5 elif data_id in MSLETOR_LIST: max_rele_level = None label_type = LABEL_TYPE.Permutation num_features = 46 has_comment = True fold_num = 5 elif data_id in YAHOO_LTR: max_rele_level = 4 label_type = LABEL_TYPE.MultiLabel num_features = 700 has_comment = False fold_num = 1 elif data_id in YAHOO_LTR_5Fold: max_rele_level = 4 label_type = LABEL_TYPE.MultiLabel num_features = 700 has_comment = False fold_num = 5 elif data_id in ISTELLA_LTR: max_rele_level = 4 label_type = LABEL_TYPE.MultiLabel num_features = 220 fold_num = 1 if data_id in ['Istella_S', 'Istella']: has_comment = False else: has_comment = True else: raise NotImplementedError data_meta = dict(num_features=num_features, has_comment=has_comment, label_type=label_type, max_rele_level=max_rele_level, fold_num=fold_num) return data_meta def get_scaler(scaler_id): assert scaler_id in SCALER_ID if scaler_id == 'MinMaxScaler': scaler = MinMaxScaler() elif scaler_id == 'RobustScaler': scaler = RobustScaler() elif scaler_id == 'StandardScaler': scaler = StandardScaler() elif scaler_id == 'SLog1P': scaler = SymmetricLog1pScaler() return scaler def get_scaler_setting(data_id, grid_search=False, scaler_id=None): if grid_search: if scaler_id is None: if data_id in MSLRWEB or data_id in ISTELLA_LTR: choice_scale_data = [True] choice_scaler_id = ['StandardScaler'] choice_scaler_level = ['QUERY'] else: choice_scale_data = [False] choice_scaler_id = [None] choice_scaler_level = [None] else: choice_scale_data = [True] choice_scaler_id = [scaler_id] choice_scaler_level = ['QUERY'] return choice_scale_data, choice_scaler_id, choice_scaler_level else: if scaler_id is None: if data_id in MSLRWEB or data_id in ISTELLA_LTR: scale_data = True scaler_id = 'StandardScaler' scaler_level = 'QUERY' else: scale_data = False scaler_id = None scaler_level = None else: scale_data = True scaler_level = 'QUERY' return scale_data, scaler_id, scaler_level def get_buffer_file_name(data_id, file, data_dict, presort=None): min_rele = data_dict['min_rele'] if min_rele is not None and min_rele > 0: fi_suffix = '_'.join(['MiR', str(min_rele)]) else: fi_suffix = '' min_docs = data_dict['min_docs'] if min_docs is not None and min_docs > 0: if len(fi_suffix)>0: fi_suffix = '_'.join([fi_suffix, 'MiD', str(min_docs)]) else: fi_suffix = '_'.join(['MiD', str(min_docs)]) res_suffix = '' if data_dict['binary_rele']: res_suffix += '_B' if data_dict['unknown_as_zero']: res_suffix += '_UO' pq_suffix = '_'.join([fi_suffix, 'PerQ']) if len(fi_suffix) > 0 else 'PerQ' assert presort is not None if presort: pq_suffix = '_'.join([pq_suffix, 'PreSort']) scale_data = data_dict['scale_data'] scaler_id = data_dict['scaler_id'] if 'scaler_id' in data_dict else None scaler_level = data_dict['scaler_level'] if 'scaler_level' in data_dict else None if scale_data: assert scaler_id is not None and scaler_id in SCALER_ID and scaler_level in SCALER_LEVEL if 'DATASET' == scaler_level: pq_suffix = '_'.join([pq_suffix, 'DS', scaler_id]) else: pq_suffix = '_'.join([pq_suffix, 'QS', scaler_id]) if data_id in YAHOO_LTR: perquery_file = file[:file.find('.txt')].replace(data_id.lower() + '.', 'Buffered' + data_id + '/') + '_' + pq_suffix + res_suffix + '.np' elif data_id in ISTELLA_LTR: perquery_file = file[:file.find('.txt')].replace(data_id, 'Buffered_' + data_id) + '_' + pq_suffix + res_suffix + '.np' else: perquery_file = file[:file.find('.txt')].replace('Fold', 'BufferedFold') + '_' + pq_suffix + res_suffix +'.np' return perquery_file def _parse_docid(comment): parts = comment.strip().split() return parts[2] def _parse_qid_tok(tok): assert tok.startswith('qid:') return tok[4:] def iter_lines(lines, has_targets=True, one_indexed=True, missing=0.0, has_comment=False): for line in lines: if has_comment: data, _, comment = line.rstrip().partition('#') toks = data.split() else: toks = line.rstrip().split() num_features = 0 feature_vec = np.repeat(missing, 8) std_score = -1.0 if has_targets: std_score = float(toks[0]) toks = toks[1:] qid = _parse_qid_tok(toks[0]) for tok in toks[1:]: fid, _, val = tok.partition(':') fid = int(fid) val = float(val) if one_indexed: fid -= 1 assert fid >= 0 while len(feature_vec) <= fid: orig = len(feature_vec) feature_vec.resize(len(feature_vec) * 2) feature_vec[orig:orig * 2] = missing feature_vec[fid] = val num_features = max(fid + 1, num_features) assert num_features > 0 feature_vec.resize(num_features) if has_comment: yield (feature_vec, std_score, qid, comment) else: yield (feature_vec, std_score, qid) def parse_letor(source, has_targets=True, one_indexed=True, missing=0.0, has_comment=False): max_width = 0 feature_vecs, std_scores, qids = [], [], [] if has_comment: comments = [] it = iter_lines(source, has_targets=has_targets, one_indexed=one_indexed, missing=missing, has_comment=has_comment) if has_comment: for f_vec, s, qid, comment in it: feature_vecs.append(f_vec) std_scores.append(s) qids.append(qid) comments.append(comment) max_width = max(max_width, len(f_vec)) else: for f_vec, s, qid in it: feature_vecs.append(f_vec) std_scores.append(s) qids.append(qid) max_width = max(max_width, len(f_vec)) assert max_width > 0 all_features_mat = np.ndarray((len(feature_vecs), max_width), dtype=np.float64) all_features_mat.fill(missing) for i, x in enumerate(feature_vecs): all_features_mat[i, :len(x)] = x all_labels_vec = np.array(std_scores) if has_comment: docids = [_parse_docid(comment) for comment in comments] return all_features_mat, all_labels_vec, qids, docids else: return all_features_mat, all_labels_vec, qids
MIT License
netromdk/slacker
slacker/commands/command.py
Command.name
python
def name(self): pass
Returns the name of the command. This is the actual command, like 'download'.
https://github.com/netromdk/slacker/blob/56ab630ba11451c254c5ec377f76033b692c61ce/slacker/commands/command.py#L32-L34
import re from cachetools import TTLCache from abc import ABC, abstractmethod from slacker.logger import Logger from slacker.slack_api import SlackAPI from prompt_toolkit.completion import WordCompleter COMMAND_NAME_REGEX = re.compile("([\w\d][\w\d\.]*)?[\w\d]+") class Command(ABC): def __init__(self): self.__validate() self.logger = Logger(self.__class__.__name__).get() self.cache = None if not self.is_destructive() and self.use_cache(): ttl = self.cache_ttl() max_items = self.max_items_in_cache() self.logger.debug("Created {} command cache (maxitems={}, ttl={})" .format(self.name(), max_items, ttl)) self.cache = TTLCache(max_items, ttl) @abstractmethod
MIT License
neuropower/neurodesign
source/src/neurodesign.py
experiment.countstim
python
def countstim(self): self.trial_duration = self.stim_duration + self.t_pre + self.t_post if self.ITImodel == "uniform": self.ITImean = (self.ITImax + self.ITImin) / 2 if self.duration: if not self.restnum == 0: blockdurNR = self.restnum * (self.ITImean + self.trial_duration) blockdurWR = blockdurNR + self.restdur blocknum = np.floor(self.duration / blockdurWR) n_trials = blocknum * self.restnum remain = self.duration - (blocknum * blockdurWR) if remain >= blockdurNR: n_trials = n_trials + self.restnum else: extratrials = np.floor( remain / (self.ITImean + self.trial_duration)) n_trials = n_trials + extratrials self.n_trials = int(n_trials) else: self.n_trials = int( self.duration / (self.ITImean + self.trial_duration)) else: ITIdur = self.n_trials * self.ITImean TRIALdur = self.n_trials * self.trial_duration duration = ITIdur + TRIALdur if self.restnum > 0: duration = duration + (np.floor(self.n_trials / self.restnum) * self.restdur) self.duration = duration
Function to compute some arguments depending on other arguments.
https://github.com/neuropower/neurodesign/blob/605b97a616b53f4e9ea767460471fc7c8d9bdd77/source/src/neurodesign.py#L434-L470
from __future__ import division from . import msequence, generate, report from numpy import transpose as t from scipy.special import gamma from collections import Counter from numpy.linalg import inv from scipy import linalg import sklearn.cluster import scipy.linalg import pandas as pd import progressbar import numpy as np import itertools import StringIO import warnings import zipfile import shutil import scipy import copy import time import math import sys import os class design(object): def __init__(self, order, ITI, experiment, onsets=None): self.order = order self.ITI = ITI self.onsets = onsets self.Fe = 0 self.Fd = 0 self.experiment = experiment if not len(self.ITI) == experiment.n_trials: raise ValueError( "length of design (ITI's) does not comply with experiment") if not len(self.order) == experiment.n_trials: raise ValueError( "length of design (orders) does not comply with experiment") def check_maxrep(self, maxrep): for stim in range(self.experiment.n_stimuli): repcheck = not ''.join( str(e) for e in [stim] * maxrep) in ''.join(str(e) for e in self.order) if repcheck == False: break return repcheck def check_hardprob(self): obscnt = Counter(self.order).values() obsprob = np.round(obscnt / np.sum(obscnt), decimals=2) if not len(self.experiment.P) == len(obsprob): return False close = np.isclose(np.array(self.experiment.P), np.array(obsprob), atol=0.001) if not np.sum(close) == len(obsprob): return False return True def crossover(self, other, seed=1234): assert len(self.order) == len(other.order) np.random.seed(seed) changepoint = np.random.choice(len(self.order), 1)[0] offspringorder1 = list(self.order)[ :changepoint] + list(other.order)[changepoint:] offspringorder2 = list(other.order)[ :changepoint] + list(self.order)[changepoint:] offspring1 = design(order=offspringorder1, ITI=self.ITI, experiment=self.experiment) offspring2 = design(order=offspringorder2, ITI=other.ITI, experiment=self.experiment) return [offspring1, offspring2] def mutation(self, q, seed=1234): np.random.seed(seed) mut_ind = np.random.choice(len(self.order), int( len(self.order) * q), replace=False) mutated = copy.copy(self.order) for mut in mut_ind: np.random.seed(seed) mut_stim = np.random.choice( self.experiment.n_stimuli, 1, replace=True)[0] mutated[mut] = mut_stim offspring = design(order=mutated, ITI=self.ITI, experiment=self.experiment) return offspring def designmatrix(self): if self.experiment.restnum > 0: orderli = list(self.order) ITIli = list(self.ITI) for x in np.arange(0, self.experiment.n_trials, self.experiment.restnum)[1:][::-1]: orderli.insert(x, "R") ITIli.insert(x, self.experiment.restdur) ITIli = [y+self.experiment.trial_duration if not x == "R" else y for x, y in zip(orderli, ITIli)] onsets = np.cumsum(ITIli)-self.experiment.trial_duration self.onsets = [y for x, y in zip(orderli, onsets) if not x == "R"] else: ITIli = np.array(self.ITI) + self.experiment.trial_duration self.onsets = np.cumsum(ITIli) - self.experiment.trial_duration stimonsets = [x + self.experiment.t_pre for x in self.onsets] self.ITI, x = _round_to_resolution(self.ITI,self.experiment.resolution) onsetX, XindStim = _round_to_resolution(stimonsets,self.experiment.resolution) stim_duration_tp = int( self.experiment.stim_duration / self.experiment.resolution) assert(np.max(XindStim) <= self.experiment.n_tp) assert(np.max(XindStim)+stim_duration_tp <= self.experiment.n_tp) X_X = np.zeros([self.experiment.n_tp, self.experiment.n_stimuli]) for stimulus in xrange(self.experiment.n_stimuli): for dur in xrange(stim_duration_tp): X_X[np.array(XindStim) + dur, int(stimulus) ] = [1 if z == stimulus else 0 for z in self.order] deconvM = np.zeros([self.experiment.n_tp, int( self.experiment.laghrf * self.experiment.n_stimuli)]) for stim in xrange(self.experiment.n_stimuli): for j in xrange(int(self.experiment.laghrf)): deconvM[j:, self.experiment.laghrf * stim + j] = X_X[:(self.experiment.n_tp - j), stim] idxX = [int(x) for x in np.arange(0, self.experiment.n_tp, self.experiment.TR / self.experiment.resolution)] if len(idxX)-self.experiment.white.shape[0]==1: idxX = idxX[:self.experiment.white.shape[0]] deconvMdown = deconvM[idxX, :] Xwhite = np.dot( np.dot(t(deconvMdown), self.experiment.white), deconvMdown) X_Z = np.zeros([self.experiment.n_tp, self.experiment.n_stimuli]) for stim in range(self.experiment.n_stimuli): X_Z[:, stim] = deconvM[:, (stim * self.experiment.laghrf):( (stim + 1) * self.experiment.laghrf)].dot(self.experiment.basishrf) X_Z = X_Z[idxX, :] X_X = X_X[idxX, :] Zwhite = t(X_Z) * self.experiment.white * X_Z self.X = Xwhite self.Z = Zwhite self.Xconv = X_Z self.Xnonconv = X_X self.CX = self.experiment.CX self.C = self.experiment.C return self def FeCalc(self, Aoptimality=True): try: invM = scipy.linalg.inv(self.X) except scipy.linalg.LinAlgError: try: invM = scipy.linalg.pinv(self.X) except np.linalg.linalg.LinAlgError: invM = np.nan sys.exc_clear() invM = np.array(invM) st1 = np.dot(self.CX, invM) CMC = np.dot(st1, t(self.CX)) if Aoptimality == True: self.Fe = float(self.CX.shape[0] / np.matrix.trace(CMC)) else: self.Fe = float(np.linalg.det(CMC)**(-1 / len(self.C))) self.Fe = self.Fe / self.experiment.FeMax return self def FdCalc(self, Aoptimality=True): try: invM = scipy.linalg.inv(self.Z) except scipy.linalg.LinAlgError: try: invM = scipy.linalg.pinv(self.Z) except np.linalg.linalg.LinAlgError: invM = np.nan sys.exc_clear() invM = np.array(invM) CMC = np.matrix(self.C) * invM * np.matrix(t(self.C)) if Aoptimality == True: self.Fd = float(len(self.C) / np.matrix.trace(CMC)) else: self.Fd = float(np.linalg.det(CMC)**(-1 / len(self.C))) self.Fd = self.Fd / self.experiment.FdMax return self def FcCalc(self, confoundorder=3): Q = np.zeros([self.experiment.n_stimuli, self.experiment.n_stimuli, confoundorder]) for n in xrange(len(self.order)): for r in np.arange(1, confoundorder + 1): if n > (r - 1): Q[self.order[n], self.order[n - r], r - 1] += 1 Qexp = np.zeros([self.experiment.n_stimuli, self.experiment.n_stimuli, confoundorder]) for si in xrange(self.experiment.n_stimuli): for sj in xrange(self.experiment.n_stimuli): for r in np.arange(1, confoundorder + 1): Qexp[si, sj, r - 1] = self.experiment.P[si] * self.experiment.P[sj] * (self.experiment.n_trials + 1) Qmatch = np.sum(abs(Q - Qexp)) self.Fc = Qmatch self.Fc = 1 - self.Fc / self.experiment.FcMax return self def FfCalc(self): trialcount = Counter(self.order) Pobs = [trialcount[x] for x in xrange(self.experiment.n_stimuli)] self.Ff = np.sum(abs(np.array( Pobs) - np.array(self.experiment.n_trials * np.array(self.experiment.P)))) self.Ff = 1 - self.Ff / self.experiment.FfMax return self def FCalc(self, weights,Aoptimality=True,confoundorder=3): if weights[0]>0: self.FeCalc(Aoptimality) if weights[1]>0: self.FdCalc(Aoptimality) self.FfCalc() self.FcCalc(confoundorder) matr = np.array([self.Fe, self.Fd, self.Ff, self.Fc]) self.F = np.sum(weights * matr) return self class experiment(object): def __init__(self, TR, P, C, rho, stim_duration, n_stimuli, ITImodel=None, ITImin=None, ITImax=None, ITImean=None, restnum=0, restdur=0, t_pre=0, t_post=0, n_trials=None, duration=None, resolution=0.1, FeMax=1, FdMax=1, FcMax=1, FfMax=1, maxrep=None, hardprob=False, confoundorder=3): self.TR = TR self.P = P self.C = np.array(C) self.rho = rho self.n_stimuli = n_stimuli self.t_pre = t_pre self.t_post = t_post self.n_trials = n_trials self.duration = duration self.resolution = resolution self.stim_duration = stim_duration self.maxrep = maxrep self.hardprob = hardprob self.confoundorder = confoundorder self.ITImodel = ITImodel self.ITImin = ITImin self.ITImean = ITImean self.ITImax = ITImax self.ITIlam = None self.restnum = restnum self.restdur = restdur self.FeMax = FeMax self.FdMax = FdMax self.FcMax = FcMax self.FfMax = FfMax if not np.isclose(self.TR % self.resolution, 0): self.resolution = _find_new_resolution(self.TR,self.resolution) warnings.warn("Warning: the resolution is adjusted to be a multiple of the TR. New resolution: %f"%self.resolution) self.countstim() self.CreateTsComp() self.CreateLmComp() self.max_eff() def max_eff(self): NulDesign = design( order=[np.argmin(self.P)] * self.n_trials, ITI=[0]+[self.ITImean] * (self.n_trials-1), experiment=self ) NulDesign.designmatrix() NulDesign.FcCalc(self.confoundorder) self.FcMax = 1 - NulDesign.Fc NulDesign.FfCalc() self.FfMax = 1 - NulDesign.Ff return self
MIT License
avatartwo/avatar2
avatar2/protocols/unicorn_protocol.py
UnicornProtocol._worker_emu_start
python
def _worker_emu_start(self, single_step=False): self._worker_queue.put(UnicornWorkerEmuStartMessage(single_step))
Start the emulation inside the worker.
https://github.com/avatartwo/avatar2/blob/86a824072ef991a3a240688600f109eec8ad1ff7/avatar2/protocols/unicorn_protocol.py#L305-L307
import sys if sys.version_info < (3, 0): import Queue as queue else: import queue import struct import unicorn import logging from threading import Thread from collections import namedtuple from avatar2.message import UpdateStateMessage, RemoteMemoryReadMessage, RemoteMemoryWriteMessage, BreakpointHitMessage from avatar2.targets import TargetStates from avatar2.archs.arm import ARM class UnicornBreakpoint(object): __slots__ = ('hooks', 'temporary', 'ignore_count') def __init__(self, hooks, temporary=False, ignore_count=0): self.hooks = hooks self.temporary = temporary self.ignore_count = ignore_count UnicornWorkerEmuStartMessage = namedtuple('UnicornWorkerEmuStartMessage', ('single_step',)) UnicornWorkerUpdateStateMessage = namedtuple('UnicornWorkerUpdateStateMessage', ('state',)) UnicornWorkerBreakpointMessage = namedtuple('UnicornWorkerBreakpointMessage', ('bkptno', 'address')) class UnicornProtocol(object): def __init__(self, avatar, arch=ARM, origin=None): self.uc = unicorn.Uc(arch.unicorn_arch, arch.unicorn_mode) self.log = logging.getLogger((origin.log.name + '.' if origin is not None else '') + self.__class__.__name__) self.arch = arch self.pending_bp = set() self._avatar_queue = avatar.queue self._avatar_fast_queue = avatar.fast_queue self._origin = origin self._breakpoints = [] self._rmp_queue = queue.Queue() self._alive = True for start, end, mr in avatar.memory_ranges: perms = unicorn.UC_PROT_NONE if 'r' in mr.permissions: perms |= unicorn.UC_PROT_READ if 'w' in mr.permissions: perms |= unicorn.UC_PROT_WRITE if 'x' in mr.permissions: perms |= unicorn.UC_PROT_EXEC self.uc.mem_map(start, end - start, perms=perms) if hasattr(mr, 'file') and mr.file is not None: with open(mr.file, 'rb') as data: self.uc.mem_write(start, data.read()) if mr.forwarded: self.uc.hook_add(unicorn.UC_HOOK_MEM_VALID, self._forward_hook, begin=start, end=end) self._avatar_fast_queue.put(UpdateStateMessage(self._origin, TargetStates.INITIALIZED)) self._worker_queue = queue.Queue() self._worker_queue.put(UnicornWorkerUpdateStateMessage(TargetStates.STOPPED)) self._worker = UnicornWorker(self._origin, self, self.uc, self._worker_queue, self._avatar_fast_queue) self._worker.start() def __del__(self): self.shutdown() def shutdown(self): if self._alive: self._worker_queue.put(None) self.stop() self._worker.join() self._alive = False def cont(self): self._worker_emu_start() def stop(self): self._worker_emu_stop() def step(self): self._worker_emu_start(single_step=True) def set_breakpoint(self, line, hardware=True, temporary=False, regex=False, condition=None, ignore_count=0, thread=0): if not hardware: self.log.warning('Software breakpoints are not supported, falling back to hardware') if regex: self.log.warning('Regex breakpoints are not supported, ignoring regex') if condition is not None: self.log.warning('Conditional breakpoints are not supported, ignoring condition') if thread: self.log.warning('Thread-specific breakpoints are not supported, ignoring thread') bkptno = len(self._breakpoints) hook = self.uc.hook_add(unicorn.UC_HOOK_CODE, self._breakpoint_hook, begin=line, end=line, user_data=bkptno) self._breakpoints.append(UnicornBreakpoint(hooks=[hook], temporary=temporary, ignore_count=ignore_count)) return bkptno def set_watchpoint(self, variable, write=True, read=False): bkptno = len(self._breakpoints) hooks = [] if write is True: hooks.append(self.uc.hook_add(unicorn.UC_HOOK_MEM_WRITE, self._watchpoint_hook, begin=variable, end=variable, user_data=bkptno)) if read is True: hooks.append(self.uc.hook_add(unicorn.UC_HOOK_MEM_READ, self._watchpoint_hook, begin=variable, end=variable, user_data=bkptno)) self._breakpoints.append(UnicornBreakpoint(hooks=hooks)) return bkptno def remove_breakpoint(self, bkptno): for hook in self._breakpoints[bkptno].hooks: self.uc.hook_del(hook) self._breakpoints[bkptno] = None def read_memory(self, address, wordsize, num_words=1, raw=False): raw_mem = self.uc.mem_read(address, wordsize * num_words) if raw: return raw_mem num2fmt = {1: 'B', 2: 'H', 4: 'I', 8: 'Q'} fmt = '<{}{}'.format(num_words, num2fmt[wordsize]) mem = struct.unpack(fmt, raw_mem) return mem[0] if num_words == 1 else mem def write_memory(self, address, wordsize, val, num_words=1, raw=False): if raw: raw_mem = val else: num2fmt = {1: 'B', 2: 'H', 4: 'I', 8: 'Q'} fmt = '<{}{}'.format(num_words, num2fmt[wordsize]) if num_words == 1: raw_mem = struct.pack(fmt, val) else: raw_mem = struct.pack(fmt, *val) try: self.uc.mem_write(address, raw_mem) return True except unicorn.UcError: self.log.debug('Failed memory write @ 0x{:x}'.format(address)) return False def write_register(self, reg, value): self.uc.reg_write(self.arch.unicorn_registers[reg], value) def read_register(self, reg): return self.uc.reg_read(self.arch.unicorn_registers[reg]) def send_response(self, id, value, success): self._rmp_queue.put((value, success)) return True def _forward_hook(self, uc, access, address, size, value, user_data): pc = self.read_register(self.arch.pc_name) if access == unicorn.UC_MEM_READ or access == unicorn.UC_MEM_FETCH: msg = RemoteMemoryReadMessage(self._origin, 0, pc, address, size) write_back = True elif access == unicorn.UC_MEM_WRITE: msg = RemoteMemoryWriteMessage(self._origin, 0, pc, address, value, size) write_back = False else: raise ValueError('Forward hook with unknown access {}'.format(access)) self._avatar_queue.put(msg) value, success = self._rmp_queue.get() if not success: self.log.debug('Remote memory request returned 0x{:x}'.format(value)) elif write_back and not self.write_memory(address, size, value): self.log.debug('Failed to write back remote memory') def _breakpoint_hook(self, uc, address, size, bkptno): if bkptno in self.pending_bp: return bp = self._breakpoints[bkptno] if bp.ignore_count > 0: bp.ignore_count -= 1 return self.pending_bp.add(bkptno) self._worker_queue.put(UnicornWorkerBreakpointMessage(bkptno, address)) self.uc.emu_stop() if bp.temporary: self.remove_breakpoint(bkptno) def _watchpoint_hook(self, uc, access, address, size, value, bkptno): if bkptno in self.pending_bp: return self.pending_bp.add(bkptno) self.stop()
Apache License 2.0
gilch/drython
drython/core.py
identity
python
def identity(x): return x
The identity function. Returns its argument. not to be confused with the id() builtin >>> identity('foo') 'foo'
https://github.com/gilch/drython/blob/eb1773c14060e31e2544f5fb69dd31621d0bc291/drython/core.py#L265-L272
from __future__ import absolute_import, division, print_function from abc import ABCMeta, abstractmethod from collections import Mapping import sys from itertools import islice, chain from functools import wraps if sys.version_info[0] == 2: from itertools import izip_longest as zip_longest else: from itertools import zip_longest Print = print _exclude_from__all__ = set(globals().keys()) __test__ = {} def _private(): class EmptyType(Mapping, tuple): __slots__ = () __len__ = tuple.__len__ __iter__ = tuple.__iter__ def __new__(cls, *args, **kwargs): return tuple.__new__(cls) def __init__(self): tuple.__init__(self) def __getitem__(self, key): raise KeyError(key) def __repr__(self): return 'Empty' def __eq__(self, other): if other == set() or other == {}: return True else: return tuple.__eq__(self, other) def __ne__(self, other): return not self == other def __hash__(self): return 0 __test__[EmptyType.__name__] = EmptyType.__doc__ res = EmptyType() def __init__(self): raise TypeError("cannot create 'EmptyType' instances") EmptyType.__init__ = __init__ return res Empty = _private() del _private def star(func): return lambda arg: func(*arg) def unstar(func): return lambda *args: func(args) def stars(func): return lambda kwargs: func(**kwargs) def unstars(func): return lambda **kwargs: func(kwargs) def allstars(func): return lambda args, kwargs: func(*args,**kwargs) _sentinel = object() def partition(iterable, n=2, step=None, fillvalue=_sentinel): step = step or n slices = (islice(iterable, start, None, step) for start in range(n)) if fillvalue is _sentinel: return zip(*slices) else: return zip_longest(*slices, fillvalue=fillvalue) def interleave(*iterables): return chain.from_iterable(zip(*iterables))
Apache License 2.0
christophreich1996/toeffipy
autograd/nn/functional.py
cross_entropy_loss
python
def cross_entropy_loss(prediction: Tensor, label: Tensor, reduction: str = 'mean') -> Tensor: assert label.shape == prediction.shape, 'Shape of label must match with prediction' loss = - (label * autograd.log(prediction)) return _apply_reduction(tensor=loss, reduction=reduction)
Function implements the multi class cross entropy loss in autograd :param prediction: (Tensor) Prediction tensor :param label: (Tensor) One hot encoded label tensor :param reduction: (str) Type of reduction to perform after apply the loss (mean, sum or none) :return: (Tensor) Loss value
https://github.com/christophreich1996/toeffipy/blob/34ca9cd97a488cdc58d2b909ba963edb80ae2b76/autograd/nn/functional.py#L735-L748
from typing import List, Union, Tuple, Optional import autograd from autograd import Tensor from autograd.tensor import Dependency import numpy as np def _conv_2d_core(input: np.ndarray, kernel: np.ndarray) -> np.ndarray: input = input.transpose((0, 2, 3, 1)) kernel = kernel.transpose((2, 3, 1, 0)) input = np.lib.stride_tricks.as_strided(input, (input.shape[0], input.shape[1] - kernel.shape[0] + 1, input.shape[2] - kernel.shape[1] + 1, kernel.shape[0], kernel.shape[1], input.shape[3]), input.strides[:3] + input.strides[1:]) return np.tensordot(input, kernel, axes=3).transpose((0, 3, 1, 2)) def conv_2d(input: Tensor, kernel: Tensor, bias: Optional[Tensor] = None) -> Tensor: assert input.data.ndim == 4, 'Input tensor must have four dimensions.' assert kernel.data.ndim == 4, 'Kernel tensor must have four dimensions.' assert input.shape[1] == kernel.shape[1], 'Kernel features and input features must match.' output = _conv_2d_core(input.data, kernel.data) requires_grad = input.requires_grad or kernel.requires_grad dependencies: List[Dependency] = [] if input.requires_grad: def grad_conv2d_input(grad: np.ndarray) -> np.ndarray: grad_padded = np.pad(grad.data, ((0, 0), (0, 0), (kernel.shape[2] - 1, kernel.shape[2] - 1), (kernel.shape[3] - 1, kernel.shape[3] - 1)), 'constant', constant_values=(1)) grad = _conv_2d_core(grad_padded, kernel.data.transpose((1, 0, 2, 3))) return grad dependencies.append(Dependency(activation=input, grad_fn=grad_conv2d_input)) if kernel.requires_grad: def grad_conv2d_kernel(grad: np.ndarray) -> np.ndarray: grad = _conv_2d_core(input.data.transpose((1, 0, 2, 3)), grad.transpose((1, 0, 2, 3))).transpose( (1, 0, 2, 3)) return grad dependencies.append(Dependency(activation=kernel, grad_fn=grad_conv2d_kernel)) output_conv = Tensor(data=output, dependencies=dependencies, requires_grad=requires_grad) if bias is None: return output_conv assert bias.data.ndim == 1, 'Bias tensor must have three dimensions.' bias_batched = np.expand_dims(np.expand_dims(np.expand_dims(bias.data, axis=0), axis=-1), axis=-1) output_bias_add = output_conv.data + bias_batched requires_grad = output_conv.requires_grad or bias.requires_grad dependencies: List[Dependency] = [] if output_conv.requires_grad: def grad_conv_output(grad: np.ndarray) -> np.ndarray: return grad dependencies.append(Dependency(activation=output_conv, grad_fn=grad_conv_output)) if bias.requires_grad: def grad_conv_bias(grad: np.ndarray) -> np.ndarray: return grad dependencies.append(Dependency(activation=output_conv, grad_fn=grad_conv_bias)) return Tensor(data=output_bias_add, dependencies=dependencies, requires_grad=requires_grad) def max_pool_2d(tensor: Tensor, kernel_size: Tuple[int, int]) -> Tensor: assert tensor.data.ndim == 4, 'Input tensor must have four dimensions (batch size, channels, features).' assert kernel_size[0] % 2 == 0 and kernel_size[1] % 2 == 0, 'Kernel size must be odd!' assert tensor.shape[2] % 2 == 0 and tensor.shape[3] % 2 == 0, 'Tensor height and width must be odd!' batch_size, channels, height, width = tensor.shape height_factor = height // kernel_size[0] width_factor = width // kernel_size[1] input_reshaped = tensor.data[:, :, :height_factor * kernel_size[0], :width_factor * kernel_size[1]] .reshape(batch_size, channels, height_factor, kernel_size[0], width_factor, kernel_size[1]) output = input_reshaped.max(axis=(3, 5)) indexes = (tensor.data == np.repeat(np.repeat(output, kernel_size[0], axis=2), kernel_size[1], axis=3)).astype(float) requires_grad = tensor.requires_grad if requires_grad: def grad_max_pool_2d(grad: np.ndarray) -> np.ndarray: unpooled_grad = np.repeat(np.repeat(grad.data, kernel_size[0], axis=2), kernel_size[1], axis=3) grad = unpooled_grad * indexes return grad dependencies = [Dependency(activation=tensor, grad_fn=grad_max_pool_2d)] else: dependencies = None return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies) def avg_pool_2d(tensor: Tensor, kernel_size: Tuple[int, int]) -> Tensor: assert tensor.data.ndim == 4, 'Input tensor must have four dimensions (batch size, channels, features).' assert kernel_size[0] % 2 == 0 and kernel_size[1] % 2 == 0, 'Kernel size must be odd!' assert tensor.shape[2] % 2 == 0 and tensor.shape[3] % 2 == 0, 'Tensor height and width must be odd!' batch_size, channels, height, width = tensor.shape height_factor = height // kernel_size[0] width_factor = width // kernel_size[1] input_reshaped = tensor.data[:, :, :height_factor * kernel_size[0], :width_factor * kernel_size[1]] .reshape(batch_size, channels, height_factor, kernel_size[0], width_factor, kernel_size[1]) output = input_reshaped.mean(axis=(3, 5)) requires_grad = tensor.requires_grad if requires_grad: def grad_avg_pool_2d(grad: np.ndarray) -> np.ndarray: unpooled_grad = np.repeat(np.repeat(grad.data, kernel_size[0], axis=2), kernel_size[1], axis=3) grad = (1 / (kernel_size[0] * kernel_size[1])) * unpooled_grad return grad dependencies = [Dependency(activation=tensor, grad_fn=grad_avg_pool_2d)] else: dependencies = None return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies) def batch_norm_1d(tensor: Tensor, gamma: Optional[Tensor] = None, beta: Optional[Tensor] = None, mean: Optional[Tensor] = None, std: Optional[Tensor] = None, eps: float = 1e-05, running_mean: Optional[Tensor] = None, running_std: Optional[Tensor] = None, momentum: float = None) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor]]: if mean is None: mean = tensor.mean() if std is None: std = tensor.std() if running_mean is not None and running_std is not None and momentum is not None: running_mean.data = momentum * running_mean.data + (1. - momentum) * mean.data running_std.data = momentum * running_std.data + (1. - momentum) * std.data mean.data = running_mean.data std.data = running_std.data output = (tensor - mean) / (std + eps) if gamma is not None: if output.data.ndim == 2: output = output * gamma.unsqueeze(dim=0) else: output = output * gamma.unsqueeze(dim=0).unsqueeze(dim=-1) if beta is not None: if output.data.ndim == 2: output = output + beta.unsqueeze(dim=0) else: output = output + beta.unsqueeze(dim=0).unsqueeze(dim=-1) return output, running_mean, running_std def batch_norm_2d(tensor: Tensor, gamma: Optional[Tensor] = None, beta: Optional[Tensor] = None, mean: Optional[Tensor] = None, std: Optional[Tensor] = None, eps: float = 1e-05, running_mean: Optional[Tensor] = None, running_std: Optional[Tensor] = None, momentum: float = None) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]: if mean is None: mean = tensor.mean() if std is None: std = tensor.std() if running_mean is not None and running_std is not None and momentum is not None: running_mean.data = momentum * running_mean.data + (1. - momentum) * mean.data running_std.data = momentum * running_std.data + (1. - momentum) * std.data mean.data = running_mean.data std.data = running_std.data output = (tensor - mean) / (std + eps) if gamma is not None: output = output * gamma.unsqueeze(dim=0).unsqueeze(dim=-1).unsqueeze(dim=-1) if beta is not None: output = output + beta.unsqueeze(dim=0).unsqueeze(dim=-1).unsqueeze(dim=-1) return output, running_mean, running_std def upsampling_nearest_2d(input: Tensor, scale_factor: int = 2) -> Tensor: assert scale_factor > 0, 'Scale factor must be greater than zero.' output = np.repeat(np.repeat(input.data, scale_factor, axis=-1), scale_factor, axis=-2) requires_grad = input.requires_grad if requires_grad: def grad_upsampling_nearest_2d(grad: np.ndarray) -> np.ndarray: height_factor = grad.shape[2] // scale_factor width_factor = grad.shape[2] // scale_factor grad_reshaped = grad.data[:, :, :height_factor * scale_factor, :width_factor * scale_factor] .reshape(grad.shape[0], grad.shape[1], height_factor, scale_factor, width_factor, scale_factor) grad = grad_reshaped.max(axis=(3, 5)) return grad dependencies = [Dependency(activation=input, grad_fn=grad_upsampling_nearest_2d)] else: dependencies = None return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies) def upsampling_nearest_1d(input: Tensor, scale_factor: int = 2) -> Tensor: assert scale_factor > 0, 'Scale factor must be greater than zero.' output = np.repeat(input.data, scale_factor, axis=-1) requires_grad = input.requires_grad if requires_grad: def grad_upsampling_nearest_1d(grad: np.ndarray) -> np.ndarray: features_factor = grad.shape[-1] // scale_factor grad = grad[:, :, :features_factor * scale_factor] .reshape(grad.shape[0], grad.shape[1], features_factor, scale_factor) .max(axis=3) return grad dependencies = [Dependency(activation=input, grad_fn=grad_upsampling_nearest_1d)] else: dependencies = None return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies) def _conv_1d_core(input: np.ndarray, kernel: np.ndarray) -> np.ndarray: sub_shape = (input.shape[2] - kernel.shape[2] + 1,) view_shape = input.shape[:2] + tuple(np.subtract(input.shape[2:], sub_shape) + 1) + sub_shape strides = input.strides[:2] + input.strides[2:] + input.strides[2:] sub_matrices = np.lib.stride_tricks.as_strided(input, view_shape, strides) return np.einsum('oci, bcik->bok', kernel, sub_matrices) def conv_1d(input: Tensor, kernel: Tensor, bias: Optional[Tensor] = None) -> Tensor: assert input.data.ndim == 3, 'Input tensor must have three dimensions.' assert kernel.data.ndim == 3, 'Kernel tensor must have three dimensions.' assert input.shape[1] == kernel.shape[1], 'Kernel features and input features must match.' output = _conv_1d_core(input.data, kernel.data) requires_grad = input.requires_grad or kernel.requires_grad dependencies: List[Dependency] = [] if input.requires_grad: def grad_conv1d_input(grad: np.ndarray) -> np.ndarray: grad = _conv_1d_core(np.pad(grad, ((0, 0), (0, 0), (kernel.shape[2] - 1, kernel.shape[2] - 1)), 'constant', constant_values=(1)), kernel.data.transpose((1, 0, 2))) return grad dependencies.append(Dependency(activation=input, grad_fn=grad_conv1d_input)) if kernel.requires_grad: def grad_conv1d_kernel(grad: np.ndarray) -> np.ndarray: grad = _conv_1d_core(input.data.transpose((1, 0, 2)), grad.transpose((1, 0, 2))).transpose((1, 0, 2)) return grad dependencies.append(Dependency(activation=kernel, grad_fn=grad_conv1d_kernel)) output_conv = Tensor(data=output, dependencies=dependencies, requires_grad=requires_grad) if bias is None: return output_conv assert bias.data.ndim == 1, 'Bias tensor must have three dimensions.' bias_batched = np.expand_dims(np.expand_dims(bias.data, axis=0), axis=-1) output_bias_add = output_conv.data + bias_batched requires_grad = output_conv.requires_grad or bias.requires_grad dependencies: List[Dependency] = [] if output_conv.requires_grad: def grad_conv_output(grad: np.ndarray) -> np.ndarray: return grad dependencies.append(Dependency(activation=output_conv, grad_fn=grad_conv_output)) if bias.requires_grad: def grad_conv_bias(grad: np.ndarray) -> np.ndarray: return grad dependencies.append(Dependency(activation=output_conv, grad_fn=grad_conv_bias)) return Tensor(data=output_bias_add, dependencies=dependencies, requires_grad=requires_grad) def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: assert input.data.ndim in [2, 3], 'Input tensor has the wrong number of dimensions. Only two or three dimensions are supported' if input.data.ndim == 2: output = np.expand_dims(input.data, axis=1) else: output = input.data weight_batched = np.expand_dims(weight.data, 0) output = weight_batched @ output.transpose((0, 2, 1)) output = output.transpose((0, 2, 1)) requires_grad = input.requires_grad or weight.requires_grad dependencies: List[Dependency] = [] if input.requires_grad: def grad_linear_input(grad: np.ndarray) -> np.ndarray: return grad @ weight.data dependencies.append(Dependency(activation=input, grad_fn=grad_linear_input)) if weight.requires_grad: def grad_linear_weight(grad: np.ndarray) -> np.ndarray: if input.data.ndim == 3: return (grad.transpose((0, 2, 1)) @ input.data).sum(axis=0) return grad.T @ input.data dependencies.append(Dependency(activation=weight, grad_fn=grad_linear_weight)) if input.data.ndim == 2: output = output[:, 0, :] output_mm = Tensor(data=output, dependencies=dependencies, requires_grad=requires_grad) if bias is None: return output_mm if output_mm.data.ndim == 2: bias_batched = np.expand_dims(bias.data, axis=0) else: bias_batched = np.expand_dims(np.expand_dims(bias.data, axis=0), axis=0) output_bias_add = output_mm.data + bias_batched requires_grad = output_mm.requires_grad or bias.requires_grad dependencies: List[Dependency] = [] if output_mm.requires_grad: def grad_linear_output_mm(grad: np.ndarray) -> np.ndarray: return grad dependencies.append(Dependency(activation=output_mm, grad_fn=grad_linear_output_mm)) if bias.requires_grad: def grad_linear_bias(grad: np.ndarray) -> np.ndarray: return grad dependencies.append(Dependency(activation=output_mm, grad_fn=grad_linear_bias)) return Tensor(data=output_bias_add, dependencies=dependencies, requires_grad=requires_grad) def max_pool_1d(tensor: Tensor, kernel_size: int) -> Tensor: assert tensor.data.ndim == 3, 'Input tensor must have three dimensions (batch size, channels, features).' batch_size, channels, num_features = tensor.shape features_factor = num_features // kernel_size input_reshaped = tensor.data[:, :, :features_factor * kernel_size] .reshape(batch_size, channels, features_factor, kernel_size) output = np.max(input_reshaped, axis=3) indexes = (tensor.data == np.repeat(output, kernel_size, axis=2)).astype(float) requires_grad = tensor.requires_grad if requires_grad: def grad_max_pool_1d(grad: np.ndarray) -> np.ndarray: unpooled_grad = np.repeat(grad.data, kernel_size, axis=2) grad = unpooled_grad * indexes return grad dependencies = [Dependency(activation=tensor, grad_fn=grad_max_pool_1d)] else: dependencies = None return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies) def avg_pool_1d(tensor: Tensor, kernel_size: int) -> Tensor: assert tensor.data.ndim == 3, 'Input tensor must have three dimensions (batch size, channels, features).' batch_size, channels, num_features = tensor.shape features_factor = num_features // kernel_size input_reshaped = tensor.data[:, :, :features_factor * kernel_size] .reshape(batch_size, channels, features_factor, kernel_size) output = np.mean(input_reshaped, axis=3, keepdims=False) requires_grad = tensor.requires_grad if requires_grad: def grad_max_pool_1d(grad: np.ndarray) -> np.ndarray: unpooled_grad = np.repeat(grad.data, kernel_size, axis=2) grad = (1 / kernel_size) * unpooled_grad return grad dependencies = [Dependency(activation=tensor, grad_fn=grad_max_pool_1d)] else: dependencies = None return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies) def dropout(tensor: Tensor, p: float = 0.2) -> Tensor: assert 0.0 <= p <= 1.0, 'Parameter p must be in the range of [0, 1].' mask = (np.random.randint(0, 1, size=tensor.shape) > p).astype(float) output = tensor.data * mask requires_grad = tensor.requires_grad dependencies = [Dependency(tensor, lambda grad: grad * mask)] if requires_grad else None return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies) def dropout2d(tensor: Tensor, p: float = 0.2) -> Tensor: assert 0.0 <= p <= 1.0, 'Parameter p must be in the range of [0, 1].' mask = (np.random.randint(0, 2, size=tensor.shape[0]) > p).astype(float).reshape(1, -1, 1, 1) output = tensor.data * mask requires_grad = tensor.requires_grad dependencies = [Dependency(tensor, lambda grad: grad * mask)] if requires_grad else None return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies) def softmax(tensor: Tensor, axis: int = 1) -> Tensor: output_exp = autograd.exp(tensor) output = output_exp / (autograd.sum(output_exp, axis=axis, keepdims=True)) return output
MIT License
abhisharma404/vault
src/lib/utilities/mac_changer/mac_changer.py
MACChanger.startProcess
python
def startProcess(self): self.changeMAC(self.newMAC) checkMAC = self.interfaceMAC() if checkMAC == self.newMAC: colors.success('MAC address succesfully changed to : {}' .format(self.newMAC)) choice = str(input('>> Do you want to restore to default (R/r)? ') .strip()) if choice == 'R' or choice == 'r': self.resetMAC() else: colors.error('Failed to change MAC address, trying again...') self.startProcess()
Change the MAC address of the interface
https://github.com/abhisharma404/vault/blob/0303cf425f028ce38cfaf40640d625861b7c805a/src/lib/utilities/mac_changer/mac_changer.py#L169-L187
import subprocess import re import sys import time import random import colors import os class MACChanger(object): def __init__(self, mac_addr=None, interface=None): self.is_root() if mac_addr is None: self.newMAC = self.generateMAC() elif self.validateMAC(mac_addr): self.newMAC = mac_addr else: colors.error('Please provide a valid MAC address...') sys.exit(1) colors.info('MAC address will be changed to : {}'.format(self.newMAC)) if interface is None: self.interface = self.getInterface() else: self.interface = interface self.origMAC = self.interfaceMAC() colors.info('Original MAC address is : {}'.format(self.origMAC)) if self.interface is None or self.newMAC is None or self.origMAC is None: colors.error('Error! could not change the MAC') sys.exit(1) @staticmethod def validateMAC(mac): if re.match("[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", mac.lower()): return True @staticmethod def is_root(): if os.geteuid() != 0: colors.error('Please run as root') sys.exit(1) else: colors.success('Running as root') @staticmethod def generateMAC(): colors.info('No desired MAC found, generating random MAC...') return "52:54:00:%02x:%02x:%02x" % ( random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), ) @staticmethod def getInterface(): colors.info('Collecting all the interfaces') p = subprocess.Popen(['ifconfig'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, error = p.communicate() if error: print(error.decode('utf-8')) sys.exit(1) output = output.decode('utf-8') interfaces = re.findall('(.*): ', output) total_index = 0 print('*' * 25) print('Index'.ljust(8, ' '), '|', ' Interface '.ljust(12, ' '), '|') print('*' * 25) for index, interface in enumerate(interfaces): print(index, ' '.ljust(5), ' | ', interface.ljust(11, ' '), '|') total_index = total_index + 1 print('-' * 25) intf = -1 while intf > total_index or intf < 0: intf = int(input('\n>> Enter the index of the interface : ') .strip()) colors.info('Selected interface is : {}'.format(interfaces[intf])) return interfaces[intf] def interfaceMAC(self): result = subprocess.Popen(['ifconfig', self.interface], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, error = result.communicate() if error: print(error.decode('utf-8')) sys.exit(1) output = output.decode('utf-8') mac_addr = re.findall(r"\w\w:\w\w:\w\w:\w\w:\w\w:\w\w", output) return mac_addr[0] def changeMAC(self, mac): colors.info('Changing MAC address...') time.sleep(2) subprocess.call(['ifconfig', self.interface, 'down']) subprocess.call(['ifconfig', self.interface, 'hw', 'ether', mac]) subprocess.call(['ifconfig', self.interface, 'up']) def resetMAC(self): self.changeMAC(self.origMAC) checkMAC = self.interfaceMAC() if checkMAC == self.origMAC: colors.success('MAC address restored to default : {}' .format(self.origMAC)) colors.info('Exiting...') sys.exit(1) else: colors.error('Failed to restore MAC address, trying again...') self.resetMAC()
MIT License
bbn-q/auspex
src/auspex/instruments/prologix.py
PrologixSocketResource.connect
python
def connect(self, ipaddr=None, gpib=None): if ipaddr is not None: self.ipaddr = ipaddr if gpib is not None: self.gpib = gpib try: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) self.sock.settimeout(self._timeout) self.sock.connect((self.ipaddr, 1234)) except socket.error as err: logger.error("Cannot open socket to Prologix at {0}: {1}".format(self.ipaddr, err.msg)) raise PrologixError(self.ipaddr) from err self.sock.send(b"++ver\r\n") whoami = self.sock.recv(128).decode() if "Prologix" not in whoami: logger.error("The device at {0} does not appear to be a Prologix; got {1}.".format(self.ipaddr, whoami)) raise PrologixError(whoami) self.sock.send(b"++mode 1\r\n") self.sock.send(b"++auto 1\r\n") self._addr() self.sock.send(b"++clr\r\n") idn = self.query(self.idn_string) if idn == '': logger.error(("Did not receive response to GPIB command {0} " + "from GPIB device {1} on Prologix at {2}.").format(self.idn_string, self.gpib, self.ipaddr)) raise PrologixError(idn) else: logger.debug(("Succesfully connected to device {0} at GPIB port {1} on" + " Prologix controller at {2}.").format(idn, self.gpib, self.ipaddr))
Connect to a GPIB device through a Prologix GPIB-ETHERNET controller. box. Args: ipaddr: The IP address of the Prologix GPIB-ETHERNET. gpib: The GPIB address of the instrument to be controlled. Returns: None.
https://github.com/bbn-q/auspex/blob/e9763e1907546ad49210415a6b8c2f6d9999f31a/src/auspex/instruments/prologix.py#L58-L98
__all__ = ['PrologixSocketResource'] import os import numpy as np import socket import functools from auspex.log import logger from pyvisa.util import _converters, from_ascii_block, to_ascii_block, to_ieee_block, from_binary_block class PrologixError(Exception): class PrologixSocketResource(object): def __init__(self, ipaddr=None, gpib=None): super(PrologixSocketResource, self).__init__() if ipaddr is not None: self.ipaddr = ipaddr if gpib is not None: self.gpib = gpib self.sock = None self._timeout = 5 self.read_termination = "\r\n" self.write_termination = "\r\n" self.idn_string = "*IDN?" self.bufsize = 4096 @property def timeout(self): return self._timeout @timeout.setter def timeout(self, value): self._timeout = timeout if self.sock is not None: self.sock.settimeout(self._timeout)
Apache License 2.0
pytorch/fairseq
examples/speech_synthesis/utils.py
gross_pitch_error
python
def gross_pitch_error(true_t, true_f, est_t, est_f): correct_frames = _true_voiced_frames(true_t, true_f, est_t, est_f) gross_pitch_error_frames = _gross_pitch_error_frames( true_t, true_f, est_t, est_f ) return np.sum(gross_pitch_error_frames) / np.sum(correct_frames)
The relative frequency in percent of pitch estimates that are outside a threshold around the true pitch. Only frames that are considered pitched by both the ground truth and the estimator (if applicable) are considered.
https://github.com/pytorch/fairseq/blob/fcca32258c8e8bcc9f9890bf4714fa2f96b6b3e1/examples/speech_synthesis/utils.py#L55-L66
import numpy as np import torch from scipy.interpolate import interp1d import torchaudio from fairseq.tasks.text_to_speech import ( batch_compute_distortion, compute_rms_dist ) def batch_mel_spectral_distortion( y1, y2, sr, normalize_type="path", mel_fn=None ): if mel_fn is None or mel_fn.sample_rate != sr: mel_fn = torchaudio.transforms.MelSpectrogram( sr, n_fft=int(0.05 * sr), win_length=int(0.05 * sr), hop_length=int(0.0125 * sr), f_min=20, n_mels=80, window_fn=torch.hann_window ).to(y1[0].device) offset = 1e-6 return batch_compute_distortion( y1, y2, sr, lambda y: torch.log(mel_fn(y) + offset).transpose(-1, -2), compute_rms_dist, normalize_type ) def _same_t_in_true_and_est(func): def new_func(true_t, true_f, est_t, est_f): assert type(true_t) is np.ndarray assert type(true_f) is np.ndarray assert type(est_t) is np.ndarray assert type(est_f) is np.ndarray interpolated_f = interp1d( est_t, est_f, bounds_error=False, kind='nearest', fill_value=0 )(true_t) return func(true_t, true_f, true_t, interpolated_f) return new_func @_same_t_in_true_and_est
MIT License
cc1-cloud/cc1
src/cm/views/user/system_image.py
get_by_id
python
def get_by_id(caller_id, system_image_id, groups): return SystemImage.get(caller_id, system_image_id, groups).dict
@cmview_user @param_post{groups,list(int)} list of Groups ids, required for @val{group} access @param_post{system_image_id,int} id of the requested Image @response{dict} SystemImage.dict property of the requested SystemImage
https://github.com/cc1-cloud/cc1/blob/8113673fa13b6fe195cea99dedab9616aeca3ae8/src/cm/views/user/system_image.py#L121-L129
import os import urllib from cm.models.iso_image import IsoImage from cm.models.storage_image import StorageImage from cm.models.system_image import SystemImage from cm.models.system_image_group import SystemImageGroup from cm.models.user import User from cm.utils import log from cm.utils.decorators import user_log from cm.utils.exception import CMException from cm.utils.threads.image import DownloadImage from common.hardware import disk_filesystems, disk_controllers, video_devices, network_devices from common.states import image_access, image_states, image_types import subprocess @user_log(log=True) def download(caller_id, description, name, path, disk_controller, network_device, platform, video_device): user = User.get(caller_id) if not any([path.startswith('http://'), path.startswith('https://'), path.startswith('ftp://')]): path = 'http://' + path.strip() try: connection = urllib.urlopen(path) size = int(connection.info()["Content-Length"]) except IOError: log.exception(caller_id, 'Cannot find image') raise CMException('image_not_found') except KeyError: log.exception(caller_id, 'Cannot calculate size') raise CMException('image_calculate_size') user = User.get(caller_id) user.check_storage(size / (1024 * 1024)) image = SystemImage.create(name=name, description=description, user=user, platform=platform, disk_controller=disk_controller, network_device=network_device, video_device=video_device) try: image.save() except Exception, e: log.error(caller_id, "Unable to save image to DB: %s" % str(e)) raise CMException('image_create') DownloadImage(image, path, size).start() @user_log(log=True) def get_list(caller_id, access, group_id=None): images = SystemImage.objects.exclude(state=image_states['locked']).filter(access=access) if access == image_access['private']: images = images.filter(user__id__exact=caller_id) if access == image_access['group']: images = images.filter(systemimagegroup__group_id__in=group_id) return [img.dict for img in images] @user_log(log=True)
Apache License 2.0
mscroggs/symfem
symfem/functionals.py
InnerProductIntegralMoment.dot
python
def dot(self, function): tdim = len(self.inner_with_left) return vdot(self.inner_with_left, tuple(vdot(function[tdim * i: tdim * (i + 1)], self.inner_with_right) for i in range(0, tdim))) * self.f * self.reference.jacobian()
Take the inner product of a function with the moment direction.
https://github.com/mscroggs/symfem/blob/a08155837e49abe9123d2d8edf60fd36f7f1b8ee/symfem/functionals.py#L506-L511
import sympy import numpy from .symbolic import subs, x, t, PiecewiseFunction, sym_sum, to_sympy, to_float from .vectors import vdot from .calculus import derivative, jacobian_component, grad, diff, div from . import mappings class BaseFunctional: def __init__(self, entity=(None, None), mapping="identity"): self.entity = entity self.mapping = mapping def eval(self, fun, symbolic=True): raise NotImplementedError def dof_point(self): return tuple(None for i in range(self.reference.gdim)) def dof_direction(self): return None def entity_dim(self): return self.entity[0] def perform_mapping(self, fs, map, inverse_map, tdim): return [getattr(mappings, self.mapping)(f, map, inverse_map, tdim) for f in fs] get_points_and_weights = None name = None class PointEvaluation(BaseFunctional): def __init__(self, point, entity=(None, None), mapping="identity"): super().__init__(entity, mapping) self.point = point def eval(self, function, symbolic=True): value = subs(function, x, self.point) if symbolic: return value else: return to_float(value) def dof_point(self): return self.point def get_points_and_weights(self, max_order=None): return numpy.array([self.point]), numpy.array([1]) name = "Point evaluation" class WeightedPointEvaluation(BaseFunctional): def __init__(self, point, weight, entity=(None, None), mapping="identity"): super().__init__(entity, mapping) self.point = point self.weight = weight def eval(self, function, symbolic=True): value = subs(function, x, self.point) * self.weight if symbolic: return value else: return to_float(value) def dof_point(self): return self.point def get_points_and_weights(self, max_order=None): return numpy.array([self.point]), numpy.array([self.weight]) name = "Weighted point evaluation" class DerivativePointEvaluation(BaseFunctional): def __init__(self, point, derivative, entity=(None, None), mapping=None): super().__init__(entity, mapping) self.point = point self.derivative = derivative def eval(self, function, symbolic=True): for i, j in zip(x, self.derivative): for k in range(j): function = diff(function, i) value = subs(function, x, self.point) if symbolic: return value else: return to_float(value) def dof_point(self): return self.point def perform_mapping(self, fs, map, inverse_map, tdim): if self.mapping is not None: return super().perform_mapping(fs, map, inverse_map, tdim) out = [] J = sympy.Matrix([[diff(map[i], x[j]) for j in range(tdim)] for i in range(tdim)]) for dofs in zip(*[fs[i::tdim] for i in range(tdim)]): for i in range(tdim): out.append(sym_sum(a * b for a, b in zip(dofs, J.row(i)))) return [subs(b, x, inverse_map) for b in out] name = "Point derivative evaluation" class PointDirectionalDerivativeEvaluation(BaseFunctional): def __init__(self, point, direction, entity=(None, None), mapping="identity"): super().__init__(entity, mapping) self.point = point self.dir = direction def eval(self, function, symbolic=True): if isinstance(function, PiecewiseFunction): function = function.get_piece(self.point) value = subs(derivative(function, self.dir), x, self.point) if symbolic: return value else: return to_float(value) def dof_point(self): return self.point def dof_direction(self): return self.dir name = "Point evaluation of directional derivative" class PointNormalDerivativeEvaluation(PointDirectionalDerivativeEvaluation): def __init__(self, point, edge, entity=(None, None), mapping="identity"): super().__init__(point, edge.normal(), entity=entity, mapping=mapping) self.reference = edge name = "Point evaluation of normal derivative" class PointComponentSecondDerivativeEvaluation(BaseFunctional): def __init__(self, point, component, entity=(None, None), mapping="identity"): super().__init__(entity, mapping) self.point = point self.component = component def eval(self, function, symbolic=True): value = subs(jacobian_component(function, self.component), x, self.point) if symbolic: return value else: return to_float(value) def dof_point(self): return self.point name = "Point evaluation of Jacobian component" class PointInnerProduct(BaseFunctional): def __init__(self, point, lvec, rvec, entity=(None, None), mapping="identity"): super().__init__(entity, mapping) self.point = point self.lvec = lvec self.rvec = rvec def eval(self, function, symbolic=True): v = subs(function, x, self.point) tdim = len(self.lvec) assert len(function) == tdim ** 2 value = vdot(self.lvec, tuple(vdot(v[tdim * i: tdim * (i + 1)], self.rvec) for i in range(0, tdim))) if symbolic: return value else: return to_float(value) def dof_point(self): return self.point def dof_direction(self): if self.rvec != self.lvec: return None return self.lvec name = "Point inner product" class DotPointEvaluation(BaseFunctional): def __init__(self, point, vector, entity=(None, None), mapping="identity"): super().__init__(entity, mapping) self.point = point self.vector = vector def eval(self, function, symbolic=True): value = vdot(subs(function, x, self.point), subs(self.vector, x, self.point)) if symbolic: return value else: return to_float(value) def dof_point(self): return self.point def dof_direction(self): return self.vector name = "Dot point evaluation" class IntegralAgainst(BaseFunctional): def __init__(self, reference, f, entity=(None, None), mapping="identity"): super().__init__(entity, mapping) self.reference = reference self.f = subs(f, x, t) if isinstance(self.f, tuple): if len(self.f) == self.reference.tdim: self.f = tuple( sum(self.reference.axes[j][i] * c / to_sympy(self.reference.jacobian()) for j, c in enumerate(self.f)) for i, o in enumerate(self.reference.origin) ) else: assert len(self.f) == self.reference.tdim ** 2 assert self.reference.vertices == self.reference.reference_vertices def dof_point(self): return tuple(sympy.Rational(sum(i), len(i)) for i in zip(*self.reference.vertices)) def eval(self, function, symbolic=True): point = [i for i in self.reference.origin] for i, a in enumerate(zip(*self.reference.axes)): for j, k in zip(a, t): point[i] += j * k integrand = self.dot(subs(function, x, point)) value = self.reference.integral(integrand) if symbolic: return value else: return to_float(value) def dot(self, function): return vdot(function, self.f) name = "Integral against" class IntegralOfDirectionalMultiderivative(BaseFunctional): def __init__(self, reference, directions, orders, scale=1, entity=(None, None), mapping="identity"): super().__init__(entity, mapping) self.reference = reference self.directions = directions self.orders = orders self.scale = scale def dof_point(self): return tuple(sympy.Rational(sum(i), len(i)) for i in zip(*self.reference.vertices)) def eval(self, function, symbolic=True): for dir, o in zip(self.directions, self.orders): for i in range(o): function = sum(d * diff(function, x[j]) for j, d in enumerate(dir)) point = [i for i in self.reference.origin] for i, a in enumerate(zip(*self.reference.axes)): for j, k in zip(a, t): point[i] += j * k integrand = self.scale * subs(function, x, point) value = self.reference.integral(integrand) if symbolic: return value else: return to_float(value) def perform_mapping(self, fs, map, inverse_map, tdim): if sum(self.orders) > 0: raise NotImplementedError("Mapping high order derivatives not implemented") return super().perform_mapping(fs, map, inverse_map, tdim) name = "Integral of a directional derivative" class IntegralMoment(BaseFunctional): def __init__(self, reference, f, dof, entity=(None, None), mapping="identity"): super().__init__(entity, mapping) self.reference = reference self.dof = dof self.f = subs(f, x, t) if isinstance(self.f, tuple): if len(self.f) == self.reference.tdim: self.f = tuple( sum(self.reference.axes[j][i] * c / to_sympy(self.reference.jacobian()) for j, c in enumerate(self.f)) for i, o in enumerate(self.reference.origin) ) else: assert len(self.f) == self.reference.tdim ** 2 assert self.reference.vertices == self.reference.reference_vertices def eval(self, function, symbolic=True): point = [i for i in self.reference.origin] for i, a in enumerate(zip(*self.reference.axes)): for j, k in zip(a, t): point[i] += j * k integrand = self.dot(subs(function, x, point)) if isinstance(integrand, PiecewiseFunction): integrand = integrand.get_piece(self.reference.midpoint()) value = self.reference.integral(to_sympy(integrand)) if symbolic: return value else: return to_float(value) def dot(self, function): return vdot(function, self.f) def dof_point(self): p = self.dof.dof_point() return tuple( o + sum(self.reference.axes[j][i] * c for j, c in enumerate(p)) for i, o in enumerate(self.reference.origin) ) def dof_direction(self): p = self.dof.dof_direction() if p is None: return None return tuple( sum(self.reference.axes[j][i] * c for j, c in enumerate(p)) for i in range(self.reference.gdim) ) name = "Integral moment" class VecIntegralMoment(IntegralMoment): def __init__(self, reference, f, dot_with, dof, entity=(None, None), mapping="identity"): super().__init__(reference, f, dof, entity=entity, mapping=mapping) self.dot_with = dot_with def dot(self, function): return vdot(function, self.dot_with) * self.f def dof_direction(self): return self.dot_with name = "Vector integral moment" class DerivativeIntegralMoment(IntegralMoment): def __init__(self, reference, f, dot_with, dof, entity=(None, None), mapping="identity"): super().__init__(reference, f, dof, entity=entity, mapping=mapping) self.dot_with = dot_with def dot(self, function): return vdot(function, self.dot_with) * self.f def dof_direction(self): return self.dot_with def eval(self, function, symbolic=True): point = [i for i in self.reference.origin] for i, a in enumerate(zip(*self.reference.axes)): for j, k in zip(a, t): point[i] += j * k integrand = self.dot(subs(grad(function, self.reference.gdim), x, point)) value = self.reference.integral(integrand) if symbolic: return value else: return to_float(value) name = "Derivative integral moment" class DivergenceIntegralMoment(IntegralMoment): def __init__(self, reference, f, dof, entity=(None, None), mapping="identity"): super().__init__(reference, f, dof, entity=entity, mapping=mapping) def eval(self, function, symbolic=True): point = [i for i in self.reference.origin] for i, a in enumerate(zip(*self.reference.axes)): for j, k in zip(a, t): point[i] += j * k integrand = self.dot(subs(div(function), x, point)) value = self.reference.integral(integrand) if symbolic: return value else: return to_float(value) name = "Integral moment of divergence" class TangentIntegralMoment(VecIntegralMoment): def __init__(self, reference, f, dof, entity=(None, None), mapping="covariant"): super().__init__(reference, f, reference.tangent(), dof, entity=entity, mapping=mapping) name = "Tangential integral moment" class NormalIntegralMoment(VecIntegralMoment): def __init__(self, reference, f, dof, entity=(None, None), mapping="contravariant"): super().__init__(reference, f, reference.normal(), dof, entity=entity, mapping=mapping) name = "Normal integral moment" class NormalDerivativeIntegralMoment(DerivativeIntegralMoment): def __init__(self, reference, f, dof, entity=(None, None), mapping="identity"): super().__init__(reference, f, reference.normal(), dof, entity=entity, mapping=mapping) name = "Normal derivative integral moment" class InnerProductIntegralMoment(IntegralMoment): def __init__(self, reference, f, inner_with_left, inner_with_right, dof, entity=(None, None), mapping="identity"): super().__init__(reference, f, dof, entity=entity, mapping=mapping) self.inner_with_left = inner_with_left self.inner_with_right = inner_with_right
MIT License
openforcefield/openff-interchange
openff/interchange/components/interchange.py
Interchange.remove_handler
python
def remove_handler(self, handler_name: str): self._inner_data.handlers.pop(handler_name)
Remove a PotentialHandler in this Interchange object.
https://github.com/openforcefield/openff-interchange/blob/a080e348b62c36c3c6a6b04e8afde64556f3186e/openff/interchange/components/interchange.py#L92-L94
import warnings from copy import deepcopy from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union import mdtraj as md import numpy as np from openff.toolkit.topology.topology import Topology from openff.toolkit.typing.engines.smirnoff import ForceField from openff.utilities.utilities import has_package, requires_package from pydantic import Field, validator from openff.interchange.components.mdtraj import _OFFBioTop from openff.interchange.components.potentials import PotentialHandler from openff.interchange.components.smirnoff import ( SMIRNOFF_POTENTIAL_HANDLERS, SMIRNOFFBondHandler, SMIRNOFFConstraintHandler, ) from openff.interchange.exceptions import ( InternalInconsistencyError, InvalidBoxError, InvalidTopologyError, MissingParameterHandlerError, MissingPositionsError, SMIRNOFFHandlersNotImplementedError, UnsupportedExportError, ) from openff.interchange.models import DefaultModel from openff.interchange.types import ArrayQuantity if TYPE_CHECKING: if has_package("foyer"): from foyer.forcefield import Forcefield as FoyerForcefield _SUPPORTED_SMIRNOFF_HANDLERS = { "Constraints", "Bonds", "Angles", "ProperTorsions", "ImproperTorsions", "vdW", "Electrostatics", "LibraryCharges", "ChargeIncrementModel", "VirtualSites", } class Interchange(DefaultModel): class InnerSystem(DefaultModel): handlers: Dict[str, PotentialHandler] = dict() topology: Optional[_OFFBioTop] = Field(None) box: ArrayQuantity["nanometer"] = Field(None) positions: ArrayQuantity["nanometer"] = Field(None) @validator("box") def validate_box(cls, val): if val is None: return val if val.shape == (3, 3): return val elif val.shape == (3,): val = val * np.eye(3) return val else: raise InvalidBoxError def __init__(self): self._inner_data = self.InnerSystem() @property def handlers(self): return self._inner_data.handlers def add_handler(self, handler_name: str, handler): self._inner_data.handlers.update({handler_name: handler})
MIT License
weblyzard/weblyzard_api
src/python/weblyzard_api/client/domain_specificity.py
DomainSpecificity.parse_documents
python
def parse_documents(self, matview_name, documents, is_case_sensitive=False, batch_size=None): found_tags = {} for document_batch in self.get_document_batch(documents=documents, batch_size=batch_size): result = self.request('parse_documents/%s/%s' % (matview_name, is_case_sensitive), document_batch) if result: found_tags.update(result[matview_name]) return found_tags
:param matview_name: a comma separated list of matview_names to check \ for domain specificity. :param documents: a list of dictionaries containing the document :param is_case_sensitive: case sensitive or not :returns: dict (profilename: (content_id, dom_spec))
https://github.com/weblyzard/weblyzard_api/blob/9dfc8d617e1fb0f78548a40162b0d3c2cff6d12b/src/python/weblyzard_api/client/domain_specificity.py#L64-L82
from __future__ import unicode_literals from eWRT.ws.rest import MultiRESTClient from weblyzard_api.client import ( WEBLYZARD_API_URL, WEBLYZARD_API_USER, WEBLYZARD_API_PASS) class DomainSpecificity(MultiRESTClient): URL_PATH = 'rest/domain_specificity' def __init__(self, url=WEBLYZARD_API_URL, usr=WEBLYZARD_API_USER, pwd=WEBLYZARD_API_PASS, default_timeout=None): MultiRESTClient.__init__(self, service_urls=url, user=usr, password=pwd, default_timeout=default_timeout) def add_profile(self, profile_name, profile_mapping): return self.request('add_or_refresh_profile/%s' % profile_name, profile_mapping, execute_all_services=True) def get_domain_specificity(self, profile_name, documents, is_case_sensitive=True): return self.request('parse_documents/%s/%s' % (profile_name, is_case_sensitive), documents)
Apache License 2.0
nteract/scrapbook
scrapbook/models.py
Notebook.filename
python
def filename(self): return os.path.basename(self.path)
str: filename found a the specified path
https://github.com/nteract/scrapbook/blob/3c74e63f7df99cca3148182454797792aede4b9b/scrapbook/models.py#L93-L95
from __future__ import unicode_literals import os import copy import nbformat import collections import pandas as pd from six import string_types from collections import OrderedDict from papermill.iorw import papermill_io from .scraps import Scrap, Scraps, payload_to_scrap, scrap_to_payload from .schemas import GLUE_PAYLOAD_PREFIX, RECORD_PAYLOAD_PREFIX from .encoders import registry as encoder_registry from .exceptions import ScrapbookException from .utils import kernel_required, deprecated try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse def merge_dicts(dicts): iterdicts = iter(dicts) outcome = next(iterdicts).copy() for d in iterdicts: outcome.update(d) return outcome class Notebook(object): def __init__(self, node_or_path): if isinstance(node_or_path, string_types): path = urlparse(node_or_path).path if not os.path.splitext(path)[-1].endswith('ipynb'): raise Warning( "Requires an '.ipynb' file extension. Provided path: '{}'".format(node_or_path) ) self.path = node_or_path self.node = nbformat.reads(papermill_io.read(node_or_path), as_version=4) else: self.path = "" self.node = node_or_path self._scraps = None self._outputs = None def copy(self): cp = Notebook(self.node.copy()) cp.path = self.path return cp @property def metadata(self): return self.node.metadata @property def nbformat_minor(self): return self.node.nbformat_minor @property def nbformat(self): return self.node.nbformat @property def cells(self): return self.node.cells @property
BSD 3-Clause New or Revised License
elastic/eland
eland/field_mappings.py
FieldMappings.field_name_pd_dtype
python
def field_name_pd_dtype(self, es_field_name: str) -> str: if es_field_name not in self._mappings_capabilities.es_field_name: raise KeyError(f"es_field_name {es_field_name} does not exist") pd_dtype = self._mappings_capabilities.loc[ self._mappings_capabilities.es_field_name == es_field_name ].pd_dtype.squeeze() return pd_dtype
Parameters ---------- es_field_name: str Returns ------- pd_dtype: str The pandas data type we map to Raises ------ KeyError If es_field_name does not exist in mapping
https://github.com/elastic/eland/blob/704c8982bcd5f89787c47c267b3d1572bb1cecdb/eland/field_mappings.py#L653-L675
import warnings from typing import ( TYPE_CHECKING, Any, Dict, List, Mapping, NamedTuple, Optional, Set, TextIO, Tuple, Union, ) import numpy as np import pandas as pd from pandas.core.dtypes.common import ( is_bool_dtype, is_datetime_or_timedelta_dtype, is_float_dtype, is_integer_dtype, is_string_dtype, ) from pandas.core.dtypes.inference import is_list_like if TYPE_CHECKING: from elasticsearch import Elasticsearch from numpy.typing import DTypeLike ES_FLOAT_TYPES: Set[str] = {"double", "float", "half_float", "scaled_float"} ES_INTEGER_TYPES: Set[str] = {"long", "integer", "short", "byte"} ES_COMPATIBLE_TYPES: Dict[str, Set[str]] = { "double": ES_FLOAT_TYPES, "scaled_float": ES_FLOAT_TYPES, "float": ES_FLOAT_TYPES, "half_float": ES_FLOAT_TYPES, "long": ES_INTEGER_TYPES, "integer": ES_INTEGER_TYPES, "short": ES_INTEGER_TYPES, "byte": ES_INTEGER_TYPES, "date": {"date_nanos"}, "date_nanos": {"date"}, "keyword": {"text"}, } class Field(NamedTuple): column: str es_field_name: str is_source: bool es_dtype: str es_date_format: Optional[str] pd_dtype: type is_searchable: bool is_aggregatable: bool is_scripted: bool aggregatable_es_field_name: str @property def is_numeric(self) -> bool: return is_integer_dtype(self.pd_dtype) or is_float_dtype(self.pd_dtype) @property def is_timestamp(self) -> bool: return is_datetime_or_timedelta_dtype(self.pd_dtype) @property def is_bool(self) -> bool: return is_bool_dtype(self.pd_dtype) @property def np_dtype(self): return np.dtype(self.pd_dtype) def is_es_agg_compatible(self, es_agg) -> bool: if isinstance(es_agg, tuple): if es_agg[0] == "extended_stats": es_agg = es_agg[1] elif es_agg[0] == "percentiles": es_agg = "percentiles" if es_agg == "median_absolute_deviation" and self.is_bool: return False if ( es_agg in {"cardinality", "value_count", "mode"} or self.is_numeric or self.is_bool ): return True if es_agg in {"min", "max", "avg", "percentiles"} and self.is_timestamp: return True return False @property def nan_value(self) -> Any: if self.is_timestamp: return pd.NaT return np.float64(np.NaN) class FieldMappings: ES_DTYPE_TO_PD_DTYPE: Dict[str, str] = { "text": "object", "keyword": "object", "long": "int64", "integer": "int64", "short": "int64", "byte": "int64", "binary": "int64", "double": "float64", "float": "float64", "half_float": "float64", "scaled_float": "float64", "date": "datetime64[ns]", "date_nanos": "datetime64[ns]", "boolean": "bool", } column_labels: List[str] = [ "es_field_name", "is_source", "es_dtype", "es_date_format", "pd_dtype", "is_searchable", "is_aggregatable", "is_scripted", "aggregatable_es_field_name", ] def __init__( self, client: "Elasticsearch", index_pattern: str, display_names: Optional[List[str]] = None, ): if (client is None) or (index_pattern is None): raise ValueError( f"Can not initialise mapping without client " f"or index_pattern {client} {index_pattern}", ) get_mapping = client.indices.get_mapping(index=index_pattern) if not get_mapping: raise ValueError( f"Can not get mapping for {index_pattern} " f"check indexes exist and client has permission to get mapping." ) all_fields = FieldMappings._extract_fields_from_mapping(get_mapping) all_fields_caps = client.field_caps(index=index_pattern, fields="*") source_fields = FieldMappings._extract_fields_from_mapping( get_mapping, source_only=True ) self._mappings_capabilities = FieldMappings._create_capability_matrix( all_fields, source_fields, all_fields_caps ) if display_names is not None: self.display_names = display_names @staticmethod def _extract_fields_from_mapping( mappings: Dict[str, Any], source_only: bool = False ) -> Dict[str, str]: fields = {} def flatten(x, name=""): if isinstance(x, dict): for a in x: if a == "type" and isinstance( x[a], str ): field_name = name[:-1] field_type = x[a] date_format = None if field_type == "date" and "format" in x: date_format = x["format"] if field_name in fields and fields[field_name] != ( field_type, date_format, ): warnings.warn( f"Field {field_name} has conflicting types " f"{fields[field_name]} != {field_type}", UserWarning, ) else: fields[field_name] = (field_type, date_format) elif a == "properties" or (not source_only and a == "fields"): flatten(x[a], name) elif not ( source_only and a == "fields" ): flatten(x[a], name + a + ".") for index in mappings: if "properties" in mappings[index]["mappings"]: properties = mappings[index]["mappings"]["properties"] else: es_types = list(mappings[index]["mappings"].keys()) if len(es_types) != 1: raise NotImplementedError( f"eland only supports 0 or 1 Elasticsearch types. es_types={es_types}" ) properties = mappings[index]["mappings"][es_types[0]]["properties"] flatten(properties) return fields @staticmethod def _create_capability_matrix(all_fields, source_fields, all_fields_caps): all_fields_caps_fields = all_fields_caps["fields"] capability_matrix = {} for field, field_caps in all_fields_caps_fields.items(): if field in all_fields: for kk, vv in field_caps.items(): _source = field in source_fields es_field_name = field es_dtype = vv["type"] es_date_format = all_fields[field][1] pd_dtype = FieldMappings._es_dtype_to_pd_dtype(vv["type"]) is_searchable = vv["searchable"] is_aggregatable = vv["aggregatable"] scripted = False aggregatable_es_field_name = None caps = [ es_field_name, _source, es_dtype, es_date_format, pd_dtype, is_searchable, is_aggregatable, scripted, aggregatable_es_field_name, ] capability_matrix[field] = caps if "non_aggregatable_indices" in vv: warnings.warn( f"Field {field} has conflicting aggregatable fields across indexes " f"{str(vv['non_aggregatable_indices'])}", UserWarning, ) if "non_searchable_indices" in vv: warnings.warn( f"Field {field} has conflicting searchable fields across indexes " f"{str(vv['non_searchable_indices'])}", UserWarning, ) capability_matrix_df = pd.DataFrame.from_dict( capability_matrix, orient="index", columns=FieldMappings.column_labels ) def find_aggregatable(row, df): row_as_dict = row.to_dict() if not row_as_dict["is_aggregatable"]: es_field_name_keyword = row.es_field_name + ".keyword" try: series = df.loc[df.es_field_name == es_field_name_keyword] if not series.empty and series.is_aggregatable.squeeze(): row_as_dict[ "aggregatable_es_field_name" ] = es_field_name_keyword else: row_as_dict["aggregatable_es_field_name"] = None except KeyError: row_as_dict["aggregatable_es_field_name"] = None else: row_as_dict["aggregatable_es_field_name"] = row_as_dict["es_field_name"] return pd.Series(data=row_as_dict) capability_matrix_df = capability_matrix_df.apply( find_aggregatable, args=(capability_matrix_df,), axis="columns" ) return capability_matrix_df[capability_matrix_df.is_source].sort_index() @classmethod def _es_dtype_to_pd_dtype(cls, es_dtype): return cls.ES_DTYPE_TO_PD_DTYPE.get(es_dtype, "object") @staticmethod def _pd_dtype_to_es_dtype(pd_dtype) -> Optional[str]: es_dtype: Optional[str] = None if is_float_dtype(pd_dtype): es_dtype = "double" elif is_integer_dtype(pd_dtype): es_dtype = "long" elif is_bool_dtype(pd_dtype): es_dtype = "boolean" elif is_string_dtype(pd_dtype): es_dtype = "keyword" elif is_datetime_or_timedelta_dtype(pd_dtype): es_dtype = "date" else: warnings.warn( f"No mapping for pd_dtype: [{pd_dtype}], using default mapping" ) return es_dtype @staticmethod def _generate_es_mappings( dataframe: "pd.DataFrame", es_type_overrides: Optional[Mapping[str, str]] = None ) -> Dict[str, Dict[str, Dict[str, Any]]]: es_dtype: Union[str, Dict[str, Any]] mapping_props: Dict[str, Any] = {} if es_type_overrides is not None: non_existing_columns: List[str] = [ key for key in es_type_overrides.keys() if key not in dataframe.columns ] if non_existing_columns: raise KeyError( f"{repr(non_existing_columns)[1:-1]} column(s) not in given dataframe" ) for column, dtype in dataframe.dtypes.iteritems(): if es_type_overrides is not None and column in es_type_overrides: es_dtype = es_type_overrides[column] if es_dtype == "text": es_dtype = { "type": "text", "fields": {"keyword": {"type": "keyword"}}, } else: es_dtype = FieldMappings._pd_dtype_to_es_dtype(dtype) if isinstance(es_dtype, str): mapping_props[column] = {"type": es_dtype} else: mapping_props[column] = es_dtype return {"mappings": {"properties": mapping_props}} def aggregatable_field_name(self, display_name: str) -> Optional[str]: mapping: Optional[pd.Series] = None try: mapping = self._mappings_capabilities.loc[display_name] except KeyError: raise KeyError( f"Can not get aggregatable field name for invalid display name {display_name}" ) from None if mapping is not None and mapping.aggregatable_es_field_name is None: warnings.warn(f"Aggregations not supported for '{display_name}'") return mapping.aggregatable_es_field_name def aggregatable_field_names(self) -> Dict[str, str]: non_aggregatables = self._mappings_capabilities[ self._mappings_capabilities.aggregatable_es_field_name.isna() ] if not non_aggregatables.empty: warnings.warn(f"Aggregations not supported for '{non_aggregatables}'") aggregatables = self._mappings_capabilities[ self._mappings_capabilities.aggregatable_es_field_name.notna() ] return dict( aggregatables[["aggregatable_es_field_name", "es_field_name"]].to_dict( orient="split" )["data"] ) def date_field_format(self, es_field_name: str) -> str: return self._mappings_capabilities.loc[ self._mappings_capabilities.es_field_name == es_field_name ].es_date_format.squeeze()
Apache License 2.0
pactools/pactools
pactools/dar_model/base_dar.py
BaseDAR.bic
python
def bic(self): return self._compute_criterion()['bic']
Bayesian information criterion (BIC) of the model
https://github.com/pactools/pactools/blob/1e95893bdfedf6e646749cf380c3815d4165bd9a/pactools/dar_model/base_dar.py#L446-L448
from abc import ABCMeta, abstractmethod import warnings import numpy as np import matplotlib.pyplot as plt from scipy import linalg from scipy.signal import fftconvolve from scipy import stats from ..utils.progress_bar import ProgressBar from ..utils.maths import squared_norm from ..utils.validation import check_array, check_consistent_shape from ..utils.validation import check_is_fitted from ..utils.spectrum import Spectrum from ..utils.viz import add_colorbar, compute_vmin_vmax, phase_string EPSILON = np.finfo(np.float32).eps class BaseDAR(object): __metaclass__ = ABCMeta def __init__(self, ordar=1, ordriv=0, criterion=None, normalize=True, ortho=True, center=True, iter_gain=10, eps_gain=1.0e-4, progress_bar=False, use_driver_phase=False, max_ordar=None, warn_gain_estimation_failure=False): self.ordar = ordar self.criterion = criterion self.normalize = normalize self.ortho = ortho self.center = center self.iter_gain = iter_gain self.eps_gain = eps_gain self.progress_bar = progress_bar self.use_driver_phase = use_driver_phase self.warn_gain_estimation_failure = warn_gain_estimation_failure self.max_ordar = max_ordar self.ordriv = ordriv self._compute_cross_orders(ordriv) self.train_weights = None self.test_weights = None self.basis_ = None def _check_all_arrays(self, sigin, sigdriv, sigdriv_imag, train_weights, test_weights): sigin = check_array(sigin) sigdriv = check_array(sigdriv) sigdriv_imag = check_array(sigdriv_imag, accept_none=True) check_consistent_shape(sigin, sigdriv, sigdriv_imag) check_consistent_shape(sigdriv, sigdriv_imag) train_weights = check_array(train_weights, accept_none=True) test_weights = check_array(test_weights, accept_none=True) if train_weights is not None: self.train_mask_ = train_weights == 0 check_consistent_shape(sigdriv, train_weights) if test_weights is None: mask = train_weights == 0 else: mask = np.logical_and(train_weights == 0, test_weights == 0) train_weights, test_weights, sigin, sigdriv, sigdriv_imag = self._remove_far_masked_data( mask, [train_weights, test_weights, sigin, sigdriv, sigdriv_imag]) self.train_mask_ = train_weights == 0 if test_weights is not None: self.test_mask_ = test_weights == 0 else: self.test_mask_ = None check_consistent_shape(sigdriv, train_weights) else: self.train_mask_ = None self.test_mask_ = None if self.use_driver_phase and self.ordriv > 0: if sigdriv_imag is None: raise ValueError('Impossible to use use_driver_phase=True ' 'without giving sigdriv_imag.') amplitude = np.sqrt(sigdriv ** 2 + sigdriv_imag ** 2) sigdriv = sigdriv / amplitude sigdriv_imag = sigdriv_imag / amplitude if self.center: sigin = sigin - np.mean(sigin) self.sigin = sigin self.sigdriv = sigdriv self.sigdriv_imag = sigdriv_imag self.train_weights = train_weights self.test_weights = test_weights def fit(self, sigin, sigdriv, fs, sigdriv_imag=None, train_weights=None, test_weights=None): self._reset_criterions() self._check_all_arrays(sigin, sigdriv, sigdriv_imag, train_weights, test_weights) self.fs = fs if self.ordar < 0: raise ValueError('self.ordar is negative') if self.ordriv < 0: raise ValueError('self.ordriv is negative') self.AR_ = np.ndarray(0) self.G_ = np.ndarray(0) if self.criterion: self._order_selection() self._estimate_error(recompute=True) else: self._fit() self._estimate_fit_std() return self def _fit(self): self.ordriv_ = self.ordriv self._make_basis() self._estimate_ar() self._estimate_error(recompute=self.test_weights is not None) self._estimate_gain() def _compute_cross_orders(self, ordriv): power_list_re, power_list_im = [], [] for j in np.arange(ordriv + 1): for k in np.arange(j + 1): power_list_re.append(j - k) power_list_im.append(k) self.n_basis = len(power_list_re) return power_list_re, power_list_im, self.n_basis def _make_basis(self, sigdriv=None, sigdriv_imag=None, ordriv=None): if sigdriv is None: sigdriv = self.sigdriv sigdriv_imag = self.sigdriv_imag save_basis = True ortho, normalize = self.ortho, self.normalize else: sigdriv = check_array(sigdriv) sigdriv_imag = check_array(sigdriv_imag, accept_none=True) save_basis = False ortho, normalize = False, False n_epochs, n_points = sigdriv.shape if ordriv is None: ordriv = self.ordriv if sigdriv_imag is None: power_list_re = np.arange(ordriv + 1) power_list_im = np.zeros(ordriv + 1) n_basis = ordriv + 1 self.n_basis = ordriv + 1 else: power_list_re, power_list_im, n_basis = self._compute_cross_orders( ordriv) alpha = np.eye(n_basis) basis = np.zeros((n_basis, n_epochs * n_points)) for k, (power_real, power_imag) in enumerate(zip(power_list_re, power_list_im)): if power_imag == 0 and power_real == 0: basis[k] = 1.0 elif power_imag > 0 and power_real == 0: basis[k] = sigdriv_imag.ravel() ** power_imag elif power_imag == 0 and power_real > 0: basis[k] = sigdriv.ravel() ** power_real elif power_imag > 0 and power_real > 0: basis[k] = sigdriv_imag.ravel() ** power_imag basis[k] *= sigdriv.ravel() ** power_real else: raise ValueError('Power cannot be negative : (%s, %s)' % (power_real, power_imag)) if ortho: for m in range(k): alpha[k, m] = -(np.sum(basis[k] * basis[m]) / np.sum( basis[m] * basis[m])) basis[k] += np.dot(alpha[k, :k], basis[:k, :]) alpha[k, :k] = np.dot(alpha[k, :k], alpha[:k, :k]) if normalize: scale = np.sqrt( float(n_epochs * n_points) / squared_norm(basis[k])) basis[k] *= scale alpha[k, :k + 1] *= scale if save_basis: self.basis_ = basis.reshape(-1, n_epochs, n_points) self.alpha_ = alpha else: if self.normalize or self.ortho: basis = np.dot(self.alpha_, basis) return basis.reshape(-1, n_epochs, n_points) def _estimate_ar(self): if self.progress_bar: bar = ProgressBar(title=self.get_title(name=True)) for AR_ in self._last_model(): self.AR_ = AR_ if self.progress_bar: bar.update( float(self.ordar_) / self.ordar, title=self.get_title( name=True)) def _remove_far_masked_data(self, mask, list_signals): if mask is None: return list_signals selection = ~mask kernel = np.ones(self.ordar * 2 + 1) kernel[-self.ordar:] = 0. delayed_selection = fftconvolve(selection, kernel[None, :], mode='same') delayed_selection[np.abs(delayed_selection) < 1e-13] = 0. time_selection = delayed_selection.sum(axis=0) != 0 epoch_selection = delayed_selection.sum(axis=1) != 0 if not np.any(time_selection) or not np.any(epoch_selection): raise ValueError("The mask seems to hide everything.") output_signals = [] for sig in list_signals: if sig is not None: sig = sig[..., epoch_selection, :] sig = sig[..., :, time_selection] output_signals.append(sig) return output_signals def _get_train_data(self, sig_list): if not isinstance(sig_list, list): sig_list = list(sig_list) train_weights = self.train_weights sig_list.append(train_weights) sig_list = self._remove_far_masked_data(self.train_mask_, sig_list) return sig_list def _get_test_data(self, sig_list): if not isinstance(sig_list, list): sig_list = list(sig_list) test_weights = self.test_weights if test_weights is None: test_weights = self.train_weights sig_list.append(test_weights) sig_list = self._remove_far_masked_data(self.test_mask_, sig_list) return sig_list def degrees_of_freedom(self): return ((self.ordar_ + 1) * self.n_basis) def get_title(self, name=True, criterion=None): title = '' if name: title += self.__class__.__name__ if hasattr(self, 'AR_'): ordar_ = self.ordar_ ordriv_ = self.ordriv_ title += '(%d, %d)' % (ordar_, ordriv_) if criterion is not None and criterion is not False: title += '_%s=%.4f' % (criterion, self.get_criterion(criterion)) else: ordar = self.ordar ordriv = self.ordriv title += '(%d, %d)' % (ordar, ordriv) return title def get_criterion(self, criterion, train=False): criterion = criterion.lower() try: value = self._compute_criterion(train=train)[criterion] except KeyError: raise KeyError('Wrong criterion: %s' % criterion) return value def _compute_criterion(self, train=False): check_is_fitted(self, 'AR_') criterions = getattr(self, 'criterions_', None) if criterions is not None: return criterions max_ordar = self.max_ordar if self.max_ordar is None: max_ordar = self.ordar logl, tmax = self._estimate_log_likelihood(train=train, skip=max_ordar) degrees = self.degrees_of_freedom() eta_aic = 2.0 aic = -2.0 * logl + eta_aic * degrees eta_bic = np.log(tmax) bic = -2.0 * logl + eta_bic * degrees self.criterions_ = { 'aic': aic / tmax, 'bic': bic / tmax, 'logl': logl / tmax, '-logl': -logl / tmax, 'tmax': tmax } return self.criterions_ @property def logl(self): return self._compute_criterion()['logl'] @property def aic(self): return self._compute_criterion()['aic'] @property
BSD 3-Clause New or Revised License
lttm/gmnet
Deeplab/research/deeplab/core/resnet_v1_beta.py
root_block_fn_for_beta_variant
python
def root_block_fn_for_beta_variant(net, depth_multiplier=1.0): net = conv2d_ws.conv2d_same( net, int(64 * depth_multiplier), 3, stride=2, scope='conv1_1') net = conv2d_ws.conv2d_same( net, int(64 * depth_multiplier), 3, stride=1, scope='conv1_2') net = conv2d_ws.conv2d_same( net, int(128 * depth_multiplier), 3, stride=1, scope='conv1_3') return net
Gets root_block_fn for beta variant. ResNet-v1 beta variant modifies the first original 7x7 convolution to three 3x3 convolutions. Args: net: A tensor of size [batch, height, width, channels], input to the model. depth_multiplier: Controls the number of convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_out * depth_multiplier`. Returns: A tensor after three 3x3 convolutions.
https://github.com/lttm/gmnet/blob/e17959eb219e1884e2be271c9244ba284c2f4ffa/Deeplab/research/deeplab/core/resnet_v1_beta.py#L153-L175
from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from six.moves import range import tensorflow as tf from tensorflow.contrib import slim as contrib_slim from research.deeplab.core import conv2d_ws from research.deeplab.core import utils from tensorflow.contrib.slim.nets import resnet_utils slim = contrib_slim _DEFAULT_MULTI_GRID = [1, 1, 1] _DEFAULT_MULTI_GRID_RESNET_18 = [1, 1] @slim.add_arg_scope def bottleneck(inputs, depth, depth_bottleneck, stride, unit_rate=1, rate=1, outputs_collections=None, scope=None): with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc: depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) if depth == depth_in: shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') else: shortcut = conv2d_ws.conv2d( inputs, depth, [1, 1], stride=stride, activation_fn=None, scope='shortcut') residual = conv2d_ws.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, scope='conv1') residual = conv2d_ws.conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate * unit_rate, scope='conv2') residual = conv2d_ws.conv2d(residual, depth, [1, 1], stride=1, activation_fn=None, scope='conv3') output = tf.nn.relu(shortcut + residual) return slim.utils.collect_named_outputs(outputs_collections, sc.name, output) @slim.add_arg_scope def lite_bottleneck(inputs, depth, stride, unit_rate=1, rate=1, outputs_collections=None, scope=None): with tf.variable_scope(scope, 'lite_bottleneck_v1', [inputs]) as sc: depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) if depth == depth_in: shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') else: shortcut = conv2d_ws.conv2d( inputs, depth, [1, 1], stride=stride, activation_fn=None, scope='shortcut') residual = conv2d_ws.conv2d_same( inputs, depth, 3, 1, rate=rate * unit_rate, scope='conv1') with slim.arg_scope([conv2d_ws.conv2d], activation_fn=None): residual = conv2d_ws.conv2d_same( residual, depth, 3, stride, rate=rate * unit_rate, scope='conv2') output = tf.nn.relu(shortcut + residual) return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
Apache License 2.0
sally20921/all4depth
all4depth/loggers/wandb_logger.py
prep_image
python
def prep_image(prefix, key, image): if is_tensor(image): image = image.detach().permute(1, 2, 0).cpu().numpy() prefix_key = '{}-{}'.format(prefix, key) return {prefix_key: wandb.Image(image, caption=key)}
Prepare image for wandb logging Parameters ---------- prefix : str Prefix added to the key for logging key : str Key from data containing the inverse depth map image : torch.Tensor [3,H,W] Image to be logged Returns ------- output : dict Dictionary with key and value for logging
https://github.com/sally20921/all4depth/blob/ef058839e16b277b4ffa6a890d03cd90b6c36283/all4depth/loggers/wandb_logger.py#L252-L273
from argparse import Namespace from collections import OrderedDict import numpy as np import torch.nn as nn import wandb from wandb.wandb_run import Run from all4depth.utils.depth import viz_inv_depth from all4depth.utils.logging import prepare_dataset_prefix from all4depth.utils.types import is_dict, is_tensor class WandbLogger: def __init__(self, name=None, dir=None, id=None, anonymous=False, version=None, project=None, entity=None, tags=None, log_model=False, experiment=None ): super().__init__() self._name = name self._dir = dir self._anonymous = 'allow' if anonymous else None self._id = version or id self._tags = tags self._project = project self._entity = entity self._log_model = log_model self._experiment = experiment if experiment else self.create_experiment() self._metrics = OrderedDict() def __getstate__(self): state = self.__dict__.copy() state['_id'] = self._experiment.id if self._experiment is not None else None state['_experiment'] = None return state def create_experiment(self): experiment = wandb.init( name=self._name, dir=self._dir, project=self._project, anonymous=self._anonymous, reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity ) wandb.run.save() return experiment def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100): self.experiment.watch(model, log=log, log_freq=log_freq) @property def experiment(self) -> Run: if self._experiment is None: self._experiment = self.create_experiment() return self._experiment @property def version(self) -> str: return self._experiment.id if self._experiment else None @property def name(self) -> str: name = self._experiment.project_name() if self._experiment else None return name @property def run_name(self) -> str: return wandb.run.name if self._experiment else None @property def run_url(self) -> str: return 'https://app.wandb.ai/{}/{}/runs/{}'.format( wandb.run.entity, wandb.run.project, wandb.run.id) if self._experiment else None @staticmethod def _convert_params(params): if isinstance(params, Namespace): params = vars(params) if params is None: params = {} return params def log_config(self, params): params = self._convert_params(params) self.experiment.config.update(params, allow_val_change=True) def log_metrics(self, metrics): self._metrics.update(metrics) if 'global_step' in metrics: self.experiment.log(self._metrics) self._metrics.clear() def log_images(self, func, mode, batch, output, args, dataset, world_size, config): dataset_idx = 0 if len(args) == 1 else args[1] prefix = prepare_dataset_prefix(config, dataset_idx) interval = len(dataset[dataset_idx]) // world_size // config.num_logs if args[0] % interval == 0: prefix_idx = '{}-{}-{}'.format(mode, prefix, batch['idx'][0].item()) func(prefix_idx, batch, output) def log_depth(self, *args, **kwargs): def log(prefix_idx, batch, output): self._metrics.update(log_rgb('rgb', prefix_idx, batch)) self._metrics.update(log_inv_depth('inv_depth', prefix_idx, output)) if 'depth' in batch: self._metrics.update(log_depth('depth', prefix_idx, batch)) self.log_images(log, *args, **kwargs) def log_rgb(key, prefix, batch, i=0): rgb = batch[key] if is_dict(batch) else batch return prep_image(prefix, key, rgb[i]) def log_depth(key, prefix, batch, i=0): depth = batch[key] if is_dict(batch) else batch inv_depth = 1. / depth[i] inv_depth[depth[i] == 0] = 0 return prep_image(prefix, key, viz_inv_depth(inv_depth, filter_zeros=True)) def log_inv_depth(key, prefix, batch, i=0): inv_depth = batch[key] if is_dict(batch) else batch return prep_image(prefix, key, viz_inv_depth(inv_depth[i]))
MIT License
catalyst-cooperative/pudl
src/pudl/glue/ferc1_eia.py
get_lost_utils_eia
python
def get_lost_utils_eia(pudl_engine): db_utils_eia = get_db_utils_eia(pudl_engine) mapped_utils_eia = get_mapped_utils_eia() lost_utils_idx = mapped_utils_eia.index.difference(db_utils_eia.index) lost_utils_eia = mapped_utils_eia.loc[lost_utils_idx] return lost_utils_eia
Get a list of all mapped EIA Utilites not found in the PUDL DB.
https://github.com/catalyst-cooperative/pudl/blob/6a75069b90219a2da55262737b92fe0a024c4fb8/src/pudl/glue/ferc1_eia.py#L565-L571
import importlib import logging import pandas as pd import sqlalchemy as sa import pudl from pudl import constants as pc logger = logging.getLogger(__name__) def get_plant_map(): map_eia_ferc_file = importlib.resources.open_binary( 'pudl.package_data.glue', 'mapping_eia923_ferc1.xlsx') return pd.read_excel( map_eia_ferc_file, 'plants_output', na_values='', keep_default_na=False, converters={ 'plant_id_pudl': int, 'plant_name_pudl': str, 'utility_id_ferc1': int, 'utility_name_ferc1': str, 'plant_name_ferc1': str, 'plant_id_eia': int, 'plant_name_eia': str, 'utility_name_eia': str, 'utility_id_eia': int }, ) def get_utility_map(): map_eia_ferc_file = importlib.resources.open_binary( 'pudl.package_data.glue', 'mapping_eia923_ferc1.xlsx') return pd.read_excel( map_eia_ferc_file, 'utilities_output', na_values='', keep_default_na=False, converters={ 'utility_id_pudl': int, 'utility_name_pudl': str, 'utility_id_ferc1': int, 'utility_name_ferc1': str, 'utility_id_eia': int, 'utility_name_eia': str }, ) def get_db_plants_ferc1(pudl_settings, years): for yr in years: if yr not in pc.data_years['ferc1']: raise ValueError( f"Input year {yr} is not available in the FERC data.") ferc1_engine = sa.create_engine(pudl_settings["ferc1_db"]) ferc1_tables = pudl.output.pudltabl.get_table_meta(ferc1_engine) respondent_table = ferc1_tables['f1_respondent_id'] plant_tables = ['f1_steam', 'f1_gnrt_plant', 'f1_hydro', 'f1_pumped_storage'] capacity_cols = {'f1_steam': 'tot_capacity', 'f1_gnrt_plant': 'capacity_rating', 'f1_hydro': 'tot_capacity', 'f1_pumped_storage': 'tot_capacity'} all_plants = pd.DataFrame() for tbl in plant_tables: plant_select = sa.sql.select( ferc1_tables[tbl].c.respondent_id, ferc1_tables[tbl].c.plant_name, ferc1_tables[tbl].columns[capacity_cols[tbl]], respondent_table.c.respondent_name ).distinct().where( sa.and_( ferc1_tables[tbl].c.respondent_id == respondent_table.c.respondent_id, ferc1_tables[tbl].c.plant_name != '', ferc1_tables[tbl].c.report_year.in_(years) ) ) all_plants = all_plants.append( pd.read_sql(plant_select, ferc1_engine). rename(columns={"respondent_id": "utility_id_ferc1", "respondent_name": "utility_name_ferc1", "plant_name": "plant_name_ferc1", capacity_cols[tbl]: "capacity_mw"}). pipe(pudl.helpers.simplify_strings, columns=["plant_name_ferc1", "utility_name_ferc1"]). assign(plant_table=tbl). loc[:, ["utility_id_ferc1", "utility_name_ferc1", "plant_name_ferc1", "capacity_mw", "plant_table"]] ) all_plants = ( all_plants.drop_duplicates(["utility_id_ferc1", "plant_name_ferc1"]). sort_values(["utility_id_ferc1", "plant_name_ferc1"]) ) return all_plants def get_mapped_plants_ferc1(): ferc1_mapped_plants = ( pudl.glue.ferc1_eia.get_plant_map(). loc[:, ["utility_id_ferc1", "utility_name_ferc1", "plant_name_ferc1"]]. dropna(subset=["utility_id_ferc1"]). pipe(pudl.helpers.simplify_strings, columns=["utility_id_ferc1", "utility_name_ferc1", "plant_name_ferc1"]). astype({"utility_id_ferc1": int}). drop_duplicates(["utility_id_ferc1", "plant_name_ferc1"]). sort_values(["utility_id_ferc1", "plant_name_ferc1"]) ) return ferc1_mapped_plants def get_mapped_utils_ferc1(): ferc1_mapped_utils = ( pudl.glue.ferc1_eia.get_utility_map() .loc[:, ["utility_id_ferc1", "utility_name_ferc1"]] .dropna(subset=["utility_id_ferc1"]) .pipe(pudl.helpers.simplify_strings, columns=["utility_id_ferc1", "utility_name_ferc1"]) .drop_duplicates(["utility_id_ferc1", "utility_name_ferc1"]) .astype({"utility_id_ferc1": int, "utility_name_ferc1": str}) .sort_values(["utility_id_ferc1"]) ) return ferc1_mapped_utils def get_unmapped_plants_ferc1(pudl_settings, years): db_plants = ( get_db_plants_ferc1(pudl_settings, years). set_index(["utility_id_ferc1", "plant_name_ferc1"]) ) mapped_plants = ( get_mapped_plants_ferc1(). set_index(["utility_id_ferc1", "plant_name_ferc1"]) ) new_plants_index = db_plants.index.difference(mapped_plants.index) unmapped_plants = db_plants.loc[new_plants_index].reset_index() return unmapped_plants def get_unmapped_utils_ferc1(ferc1_engine): all_utils_ferc1 = ( pd.read_sql_table("f1_respondent_id", ferc1_engine) .rename(columns={ "respondent_id": "utility_id_ferc1", "respondent_name": "utility_name_ferc1", }) .pipe(pudl.helpers.simplify_strings, ["utility_name_ferc1"]) .set_index(["utility_id_ferc1", "utility_name_ferc1"]) ) mapped_utils_ferc1 = ( get_mapped_utils_ferc1() .pipe(pudl.helpers.simplify_strings, ["utility_name_ferc1"]) .set_index(["utility_id_ferc1", "utility_name_ferc1"]) ) unmapped_utils_ferc1 = ( all_utils_ferc1.loc[ all_utils_ferc1.index .difference(mapped_utils_ferc1.index) ] .reset_index() .loc[:, ["utility_id_ferc1", "utility_name_ferc1"]] ) return unmapped_utils_ferc1 def get_db_plants_eia(pudl_engine): db_plants_eia = ( pd.read_sql("plants_entity_eia", pudl_engine). loc[:, ["plant_id_eia", "plant_name_eia", "state"]]. pipe(pudl.helpers.simplify_strings, columns=["plant_name_eia"]). astype({"plant_id_eia": int}). drop_duplicates("plant_id_eia"). sort_values("plant_id_eia") ) return db_plants_eia def get_mapped_plants_eia(): mapped_plants_eia = ( pudl.glue.ferc1_eia.get_plant_map(). loc[:, ["plant_id_eia", "plant_name_eia"]]. dropna(subset=["plant_id_eia"]). pipe(pudl.helpers.simplify_strings, columns=["plant_name_eia"]). astype({"plant_id_eia": int}). drop_duplicates("plant_id_eia"). sort_values("plant_id_eia") ) return mapped_plants_eia def get_unmapped_plants_eia(pudl_engine): plants_utils_eia = ( pd.read_sql("""SELECT DISTINCT plant_id_eia, utility_id_eia FROM plants_eia860;""", pudl_engine). dropna(). astype({"plant_id_eia": int, "utility_id_eia": int}). drop_duplicates(). merge(get_db_utils_eia(pudl_engine).reset_index(), on="utility_id_eia") ) plant_capacity_mw = ( pd.read_sql("SELECT * FROM generators_eia860;", pudl_engine). groupby(["plant_id_eia"])[["capacity_mw"]].agg(sum). reset_index() ) db_plants_eia = get_db_plants_eia(pudl_engine).set_index("plant_id_eia") mapped_plants_eia = get_mapped_plants_eia().set_index("plant_id_eia") unmapped_plants_idx = ( db_plants_eia.index. difference(mapped_plants_eia.index) ) unmapped_plants_eia = ( db_plants_eia.loc[unmapped_plants_idx]. merge(plants_utils_eia, how="left", on="plant_id_eia"). merge(plant_capacity_mw, how="left", on="plant_id_eia"). loc[:, ["plant_id_eia", "plant_name_eia", "utility_id_eia", "utility_name_eia", "state", "capacity_mw"]]. astype({"utility_id_eia": "Int32"}) ) return unmapped_plants_eia def get_lost_plants_eia(pudl_engine): mapped_plants_eia = get_mapped_plants_eia().set_index("plant_id_eia") db_plants_eia = get_db_plants_eia(pudl_engine).set_index("plant_id_eia") lost_plants_idx = mapped_plants_eia.index.difference(db_plants_eia.index) lost_plants_eia = mapped_plants_eia.loc[lost_plants_idx] return lost_plants_eia def get_db_utils_eia(pudl_engine): db_utils_eia = ( pd.read_sql("utilities_entity_eia", pudl_engine). loc[:, ["utility_id_eia", "utility_name_eia"]]. pipe(pudl.helpers.simplify_strings, columns=["utility_name_eia"]). astype({"utility_id_eia": int}). drop_duplicates("utility_id_eia"). sort_values("utility_id_eia"). set_index("utility_id_eia") ) return db_utils_eia def get_utility_most_recent_capacity(pudl_engine): query = """ select utility_id_eia, capacity_mw, report_date from generators_eia860 """ with pudl_engine.connect() as conn: generator_capacities = pd.read_sql(query, conn, parse_dates=["report_date"]) generator_capacities['utility_id_eia'] = generator_capacities['utility_id_eia'].astype( "Int64") most_recent_generators_idx = generator_capacities.groupby( "utility_id_eia")["report_date"].transform(max) == generator_capacities["report_date"] most_recent_generators = generator_capacities[most_recent_generators_idx] utility_capacities = most_recent_generators.groupby("utility_id_eia").sum() utility_capacities = utility_capacities.rename( columns={"capacity_mw": "most_recent_total_capacity_mw"}) return utility_capacities def get_mapped_utils_eia(): mapped_utils_eia = ( pudl.glue.ferc1_eia.get_utility_map(). loc[:, ["utility_id_eia", "utility_name_eia"]]. dropna(subset=["utility_id_eia"]). pipe(pudl.helpers.simplify_strings, columns=["utility_name_eia"]). astype({"utility_id_eia": int}). drop_duplicates(["utility_id_eia"]). sort_values(["utility_id_eia"]). set_index("utility_id_eia") ) return mapped_utils_eia def get_unmapped_utils_eia(pudl_engine): db_utils_eia = get_db_utils_eia(pudl_engine) mapped_utils_eia = get_mapped_utils_eia() unmapped_utils_idx = db_utils_eia.index.difference(mapped_utils_eia.index) unmapped_utils_eia = db_utils_eia.loc[unmapped_utils_idx] utils_recent_capacity = get_utility_most_recent_capacity(pudl_engine) unmapped_utils_eia = unmapped_utils_eia.merge( utils_recent_capacity, on="utility_id_eia", how="left", validate="1:1") unmapped_utils_eia = unmapped_utils_eia.sort_values( by="most_recent_total_capacity_mw", ascending=False) return unmapped_utils_eia def get_unmapped_utils_with_plants_eia(pudl_engine): pudl_out = pudl.output.pudltabl.PudlTabl(pudl_engine) utils_idx = ["utility_id_eia", "report_date"] plants_idx = ["plant_id_eia", "report_date"] own_idx = ["plant_id_eia", "generator_id", "owner_utility_id_eia", "report_date"] utils_eia860 = ( pudl_out.utils_eia860() .dropna(subset=utils_idx) .set_index(utils_idx) ) plants_eia860 = ( pudl_out.plants_eia860() .dropna(subset=plants_idx) .set_index(plants_idx) ) own_eia860 = ( pudl_out.own_eia860() .dropna(subset=own_idx) .set_index(own_idx) ) own_miss_utils = set( own_eia860[own_eia860.utility_id_pudl.isnull()] .utility_id_eia.unique() ) plants_miss_utils = set( plants_eia860[plants_eia860.utility_id_pudl.isnull()] .utility_id_eia.unique() ) utils_eia860 = utils_eia860.reset_index() miss_utils = utils_eia860[ (utils_eia860.utility_id_pudl.isna()) & ( (utils_eia860.plants_reported_owner == "True") | (utils_eia860.plants_reported_asset_manager == "True") | (utils_eia860.plants_reported_operator == "True") | (utils_eia860.plants_reported_other_relationship == "True") | (utils_eia860.utility_id_eia.isin(own_miss_utils)) | (utils_eia860.utility_id_eia.isin(plants_miss_utils)) ) ] miss_utils = ( miss_utils.drop_duplicates("utility_id_eia") .set_index("utility_id_eia") .loc[:, ["utility_name_eia"]] ) utils_recent_capacity = get_utility_most_recent_capacity(pudl_engine) miss_utils = miss_utils.merge( utils_recent_capacity, on="utility_id_eia", how="left", validate="1:1") miss_utils = miss_utils.sort_values( by="most_recent_total_capacity_mw", ascending=False) return miss_utils
MIT License
tell-k/goolabs
goolabs/commands.py
morph
python
def morph(ctx, app_id, sentence_file, json_flag, sentence, info_filter, pos_filter, request_id): app_id = clean_app_id(app_id) sentence = clean_sentence(sentence, sentence_file) if info_filter: info_filter = info_filter.replace(',', '|') if pos_filter: pos_filter = pos_filter.replace(',', '|') api = GoolabsAPI(app_id) ret = api.morph( sentence=sentence, info_filter=info_filter, pos_filter=pos_filter, request_id=request_id, ) if json_flag: click.echo(format_json(api.response.json())) return for words in ret['word_list']: for word in words: click.echo(','.join(word))
Morphological analysis for Japanese.
https://github.com/tell-k/goolabs/blob/3b87d0409e55c71290158ad6d5e2d8bb9a338c46/goolabs/commands.py#L107-L135
from __future__ import division, print_function, absolute_import import json import locale import click import six import goolabs from goolabs import GoolabsAPI if 0: from typing import Optional, IO, List, Dict, Any from click.core import Context def text(s): if isinstance(s, six.binary_type): return s.decode(locale.getpreferredencoding()) return s def clean_app_id(app_id): if not app_id: raise click.UsageError('Missing option "--app-id" / "-a" ' 'or GOOLABS_APP_ID enviroment value.') return app_id def clean_sentence(sentence, sentence_file): if not sentence and not sentence_file: raise click.UsageError('Missing sentence. You must set' ' SENTENCE argument or --file option.') if not sentence and sentence_file: sentence = text(sentence_file.read()) return sentence def clean_review(review, review_file): if not review and not review_file: raise click.UsageError('Missing review. You must set' ' REVIEW argument or --file option.') if not review and review_file: review = text(review_file.read()) return review.split('\n') def clean_body(body, body_file): if not body and not body_file: raise click.UsageError('Missing body. You must set' ' BODY argument or --file option.') if not body and body_file: body = text(body_file.read()) return body def clean_length(length): if length is None: return None try: return int(length) except ValueError: raise click.UsageError( '--length is not Integer. You must choice length from 60/120/180.' ) def format_json(json_data): return json.dumps(json_data, indent=2, ensure_ascii=False) @click.group() @click.pass_context @click.version_option(version=goolabs.__version__) def main(ctx): @main.command() @click.argument('sentence', required=False, type=text) @click.option('--app-id', '-a', 'app_id', envvar='GOOLABS_APP_ID', type=text) @click.option('--request-id', '-r', 'request_id', type=text) @click.option('--info-filter', '-i', 'info_filter', type=text, help='form,pos,read') @click.option('--pos-filter', '-p', 'pos_filter', type=text, help=u'名刺,動詞活用語尾,句点..etc') @click.option('--file', '-f', 'sentence_file', type=click.File('rb')) @click.option('--json/--no-json', '-j', 'json_flag', default=False) @click.pass_context
MIT License
machine-learning-exchange/mlx
api/client/swagger_client/models/api_pipeline_extended.py
ApiPipelineExtended.__init__
python
def __init__(self, id=None, created_at=None, name=None, description=None, parameters=None, status=None, default_version_id=None, namespace=None, annotations=None, featured=None, publish_approved=None): self._id = None self._created_at = None self._name = None self._description = None self._parameters = None self._status = None self._default_version_id = None self._namespace = None self._annotations = None self._featured = None self._publish_approved = None self.discriminator = None if id is not None: self.id = id if created_at is not None: self.created_at = created_at if name is not None: self.name = name if description is not None: self.description = description if parameters is not None: self.parameters = parameters if status is not None: self.status = status if default_version_id is not None: self.default_version_id = default_version_id if namespace is not None: self.namespace = namespace if annotations is not None: self.annotations = annotations if featured is not None: self.featured = featured if publish_approved is not None: self.publish_approved = publish_approved
ApiPipelineExtended - a model defined in Swagger
https://github.com/machine-learning-exchange/mlx/blob/be1503c45538dac1a8188560fbec4a07b2a367bf/api/client/swagger_client/models/api_pipeline_extended.py#L68-L105
import pprint import re import six from swagger_client.models.api_parameter import ApiParameter from swagger_client.models.api_pipeline import ApiPipeline from swagger_client.models.api_pipeline_extension import ApiPipelineExtension class ApiPipelineExtended(ApiPipeline, ApiPipelineExtension): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'str', 'created_at': 'datetime', 'name': 'str', 'description': 'str', 'parameters': 'list[ApiParameter]', 'status': 'str', 'default_version_id': 'str', 'namespace': 'str', 'annotations': 'dict(str, str)', 'featured': 'bool', 'publish_approved': 'bool' } attribute_map = { 'id': 'id', 'created_at': 'created_at', 'name': 'name', 'description': 'description', 'parameters': 'parameters', 'status': 'status', 'default_version_id': 'default_version_id', 'namespace': 'namespace', 'annotations': 'annotations', 'featured': 'featured', 'publish_approved': 'publish_approved' }
Apache License 2.0
osmr/imgclsmob
keras_/kerascv/models/igcv3.py
igcv3_w3d4
python
def igcv3_w3d4(**kwargs): return get_igcv3(width_scale=0.75, model_name="igcv3_w3d4", **kwargs)
IGCV3-D 0.75x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters.
https://github.com/osmr/imgclsmob/blob/ea5f784eea865ce830f3f97c5c1d1f6491d9cbb2/keras_/kerascv/models/igcv3.py#L230-L242
__all__ = ['igcv3', 'igcv3_w1', 'igcv3_w3d4', 'igcv3_wd2', 'igcv3_wd4'] import os from keras import layers as nn from keras.models import Model from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, channel_shuffle_lambda, is_channels_first, flatten def inv_res_unit(x, in_channels, out_channels, strides, expansion, name="inv_res_unit"): residual = (in_channels == out_channels) and (strides == 1) mid_channels = in_channels * 6 if expansion else in_channels groups = 2 if residual: identity = x x = conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, groups=groups, activation=None, name=name + "/conv1") x = channel_shuffle_lambda( channels=mid_channels, groups=groups, name=name + "/c_shuffle")(x) x = dwconv3x3_block( x=x, in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation="relu6", name=name + "/conv2") x = conv1x1_block( x=x, in_channels=mid_channels, out_channels=out_channels, groups=groups, activation=None, name=name + "/conv3") if residual: x = nn.add([x, identity], name=name + "/add") return x def igcv3(channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), classes=1000): input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = conv3x3_block( x=input, in_channels=in_channels, out_channels=init_block_channels, strides=2, activation="relu6", name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 expansion = (i != 0) or (j != 0) x = inv_res_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, expansion=expansion, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = conv1x1_block( x=x, in_channels=in_channels, out_channels=final_block_channels, activation="relu6", name="features/final_block") in_channels = final_block_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_igcv3(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): init_block_channels = 32 final_block_channels = 1280 layers = [1, 4, 6, 8, 6, 6, 1] downsample = [0, 1, 1, 1, 0, 1, 0] channels_per_layers = [16, 24, 32, 64, 96, 160, 320] from functools import reduce channels = reduce( lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), [[]]) if width_scale != 1.0: def make_even(x): return x if (x % 2 == 0) else x + 1 channels = [[make_even(int(cij * width_scale)) for cij in ci] for ci in channels] init_block_channels = make_even(int(init_block_channels * width_scale)) if width_scale > 1.0: final_block_channels = make_even(int(final_block_channels * width_scale)) net = igcv3( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def igcv3_w1(**kwargs): return get_igcv3(width_scale=1.0, model_name="igcv3_w1", **kwargs)
MIT License
argoproj-labs/argo-client-python
argo/workflows/client/models/v1alpha1_hdfs_artifact.py
V1alpha1HDFSArtifact.krb_realm
python
def krb_realm(self, krb_realm): self._krb_realm = krb_realm
Sets the krb_realm of this V1alpha1HDFSArtifact. KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used. # noqa: E501 :param krb_realm: The krb_realm of this V1alpha1HDFSArtifact. # noqa: E501 :type: str
https://github.com/argoproj-labs/argo-client-python/blob/993d684cab39a834770b296e028519cec035c7b5/argo/workflows/client/models/v1alpha1_hdfs_artifact.py#L244-L253
import pprint import re import six from argo.workflows.client.configuration import Configuration class V1alpha1HDFSArtifact(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'addresses': 'list[str]', 'force': 'bool', 'hdfs_user': 'str', 'krb_c_cache_secret': 'V1SecretKeySelector', 'krb_config_config_map': 'V1ConfigMapKeySelector', 'krb_keytab_secret': 'V1SecretKeySelector', 'krb_realm': 'str', 'krb_service_principal_name': 'str', 'krb_username': 'str', 'path': 'str' } attribute_map = { 'addresses': 'addresses', 'force': 'force', 'hdfs_user': 'hdfsUser', 'krb_c_cache_secret': 'krbCCacheSecret', 'krb_config_config_map': 'krbConfigConfigMap', 'krb_keytab_secret': 'krbKeytabSecret', 'krb_realm': 'krbRealm', 'krb_service_principal_name': 'krbServicePrincipalName', 'krb_username': 'krbUsername', 'path': 'path' } def __init__(self, addresses=None, force=None, hdfs_user=None, krb_c_cache_secret=None, krb_config_config_map=None, krb_keytab_secret=None, krb_realm=None, krb_service_principal_name=None, krb_username=None, path=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._addresses = None self._force = None self._hdfs_user = None self._krb_c_cache_secret = None self._krb_config_config_map = None self._krb_keytab_secret = None self._krb_realm = None self._krb_service_principal_name = None self._krb_username = None self._path = None self.discriminator = None self.addresses = addresses if force is not None: self.force = force if hdfs_user is not None: self.hdfs_user = hdfs_user if krb_c_cache_secret is not None: self.krb_c_cache_secret = krb_c_cache_secret if krb_config_config_map is not None: self.krb_config_config_map = krb_config_config_map if krb_keytab_secret is not None: self.krb_keytab_secret = krb_keytab_secret if krb_realm is not None: self.krb_realm = krb_realm if krb_service_principal_name is not None: self.krb_service_principal_name = krb_service_principal_name if krb_username is not None: self.krb_username = krb_username self.path = path @property def addresses(self): return self._addresses @addresses.setter def addresses(self, addresses): if self.local_vars_configuration.client_side_validation and addresses is None: raise ValueError("Invalid value for `addresses`, must not be `None`") self._addresses = addresses @property def force(self): return self._force @force.setter def force(self, force): self._force = force @property def hdfs_user(self): return self._hdfs_user @hdfs_user.setter def hdfs_user(self, hdfs_user): self._hdfs_user = hdfs_user @property def krb_c_cache_secret(self): return self._krb_c_cache_secret @krb_c_cache_secret.setter def krb_c_cache_secret(self, krb_c_cache_secret): self._krb_c_cache_secret = krb_c_cache_secret @property def krb_config_config_map(self): return self._krb_config_config_map @krb_config_config_map.setter def krb_config_config_map(self, krb_config_config_map): self._krb_config_config_map = krb_config_config_map @property def krb_keytab_secret(self): return self._krb_keytab_secret @krb_keytab_secret.setter def krb_keytab_secret(self, krb_keytab_secret): self._krb_keytab_secret = krb_keytab_secret @property def krb_realm(self): return self._krb_realm @krb_realm.setter
Apache License 2.0
lightly-ai/lightly
lightly/openapi_generated/swagger_client/api/datasets_api.py
DatasetsApi.delete_dataset_by_id_with_http_info
python
def delete_dataset_by_id_with_http_info(self, dataset_id, **kwargs): all_params = ['dataset_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_dataset_by_id" % key ) params[key] = val del params['kwargs'] if self.api_client.client_side_validation and ('dataset_id' not in params or params['dataset_id'] is None): raise ValueError("Missing the required parameter `dataset_id` when calling `delete_dataset_by_id`") collection_formats = {} path_params = {} if 'dataset_id' in params: path_params['datasetId'] = params['dataset_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api( '/v1/datasets/{datasetId}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
delete_dataset_by_id # noqa: E501 Delete a specific dataset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_dataset_by_id_with_http_info(dataset_id, async_req=True) >>> result = thread.get() :param async_req bool :param MongoObjectID dataset_id: ObjectId of the dataset (required) :return: None If the method is called asynchronously, returns the request thread.
https://github.com/lightly-ai/lightly/blob/00820e5a60522effb3685a8d792f15e99770ea50/lightly/openapi_generated/swagger_client/api/datasets_api.py#L157-L228
from __future__ import absolute_import import re import six from lightly.openapi_generated.swagger_client.api_client import ApiClient class DatasetsApi(object): def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_dataset(self, body, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_dataset_with_http_info(body, **kwargs) else: (data) = self.create_dataset_with_http_info(body, **kwargs) return data def create_dataset_with_http_info(self, body, **kwargs): all_params = ['body'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_dataset" % key ) params[key] = val del params['kwargs'] if self.api_client.client_side_validation and ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_dataset`") collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api( '/v1/datasets', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CreateEntityResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_dataset_by_id(self, dataset_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_dataset_by_id_with_http_info(dataset_id, **kwargs) else: (data) = self.delete_dataset_by_id_with_http_info(dataset_id, **kwargs) return data
MIT License
genomicsengland/gelreportmodels
protocols/migration/migration_reports_500_to_reports_400.py
MigrateReports500To400.migrate_interpretation_request_rd
python
def migrate_interpretation_request_rd(self, old_instance, old_ig): new_instance = self.convert_class(self.new_model.InterpretationRequestRD, old_instance) new_instance.versionControl = self.new_model.ReportVersionControl() new_instance.genomeAssemblyVersion = old_instance.genomeAssembly if new_instance.bams is None: new_instance.bams = [] if new_instance.vcfs is None: new_instance.vcfs = [] new_instance.tieredVariants = self.convert_collection(old_ig.variants, self._migrate_reported_variant) new_instance.tieringVersion = old_ig.softwareVersions.get("tiering", "") new_instance.analysisVersion = '' new_instance.analysisReturnUri = '' if old_instance.additionalInfo: new_instance.analysisVersion = old_instance.additionalInfo.get('analysisVersion') or '' new_instance.analysisReturnUri = old_instance.additionalInfo.get('analysisReturnUri', '') new_instance.tieringVersion = old_instance.additionalInfo.get('tieringVersion', '') new_instance.complexGeneticPhenomena = old_instance.additionalInfo.get('complexGeneticPhenomena') new_instance.cellbaseVersion = old_instance.additionalInfo.get('cellbaseVersion', '') new_instance.interpretGenome = bool(distutils.util.strtobool(old_instance.additionalInfo.get('interpretGenome', 'false'))) if not old_instance.pedigree: raise MigrationError("Cannot reverse migrate an Interpretation Request for RD with null pedigree") new_instance.pedigree = MigrateParticipant110To100().migrate_pedigree(old_instance.pedigree) return self.validate_object(object_to_validate=new_instance, object_type=self.new_model.InterpretationRequestRD)
Migrates a reports_5_0_0.InterpretationRequestRD into a reports_4_0_0.InterpretationRequestRD :type old_instance: reports_5_0_0.InterpretationRequestRD :type old_ig: reports_5_0_0.InterpretedGenomeRD :rtype: reports_4_0_0.InterpretationRequestRD
https://github.com/genomicsengland/gelreportmodels/blob/879bf5dd6d16efc274257e1c3f527d6b7459fa45/protocols/migration/migration_reports_500_to_reports_400.py#L58-L90
import logging import distutils.util from protocols import reports_4_0_0 as reports_4_0_0 from protocols import reports_5_0_0 as reports_5_0_0 from protocols.migration.base_migration import BaseMigrateReports400And500 from protocols.migration.base_migration import MigrationError from protocols.migration import MigrateParticipant110To100 class MigrateReports500To400(BaseMigrateReports400And500): old_model = reports_5_0_0 new_model = reports_4_0_0 cip_short_codes = { 'omicia': 'OPA', 'congenica': 'SAP', 'nextcode': 'CSA', 'illumina': 'ILMN', 'genomics_england': 'GEL', 'exomiser': 'EXM' } tier_map = { old_model.Tier.NONE: new_model.Tier.NONE, old_model.Tier.TIER1: new_model.Tier.TIER1, old_model.Tier.TIER2: new_model.Tier.TIER2, old_model.Tier.TIER3: new_model.Tier.TIER3, old_model.Tier.TIER4: new_model.Tier.NONE, old_model.Tier.TIER5: new_model.Tier.NONE, } genotype_map = { old_model.Zygosity.reference_homozygous: new_model.Zygosity.reference_homozygous, old_model.Zygosity.heterozygous: new_model.Zygosity.heterozygous, old_model.Zygosity.alternate_homozygous: new_model.Zygosity.alternate_homozygous, old_model.Zygosity.missing: new_model.Zygosity.missing, old_model.Zygosity.half_missing_reference: new_model.Zygosity.half_missing_reference, old_model.Zygosity.half_missing_alternate: new_model.Zygosity.half_missing_alternate, old_model.Zygosity.alternate_hemizigous: new_model.Zygosity.alternate_hemizigous, old_model.Zygosity.reference_hemizigous: new_model.Zygosity.reference_hemizigous, old_model.Zygosity.unk: new_model.Zygosity.unk, } feature_type_map = { old_model.GenomicEntityType.transcript: new_model.FeatureTypes.Transcript, old_model.GenomicEntityType.regulatory_region: new_model.FeatureTypes.RegulatoryRegion, old_model.GenomicEntityType.gene: new_model.FeatureTypes.Gene, } variant_classification_map = { old_model.ClinicalSignificance.benign: new_model.VariantClassification.benign_variant, old_model.ClinicalSignificance.likely_benign: new_model.VariantClassification.likely_benign_variant, old_model.ClinicalSignificance.VUS: new_model.VariantClassification.variant_of_unknown_clinical_significance, old_model.ClinicalSignificance.uncertain_significance: new_model.VariantClassification.variant_of_unknown_clinical_significance, old_model.ClinicalSignificance.likely_pathogenic: new_model.VariantClassification.likely_pathogenic_variant, old_model.ClinicalSignificance.pathogenic: new_model.VariantClassification.pathogenic_variant, }
Apache License 2.0
joonaspu/video-game-behavioural-cloning
record_human_play.py
finish_recording
python
def finish_recording(recording_path, env_name, unique_id, data): trajectory_file = os.path.join( recording_path, "trajectories_pressed_buttons", "{}".format(env_name), "{}.json".format(unique_id) ) with open(trajectory_file, "w") as f: json.dump(data, f)
Store recorded data into a json file
https://github.com/joonaspu/video-game-behavioural-cloning/blob/828aaba3d8d275564f15f809611a3c253cea0298/record_human_play.py#L66-L75
import argparse import time import os import json from video_game_env.connection import Connection parser = argparse.ArgumentParser("""Record humans playing video games. Hotkeys: - Page Up + Q: Quit - Page Up + R: Start recording, or stop and start new recording - Page Up + S: Stop recording """) parser.add_argument("--dont-start-binary", action="store_true", help="Do not start the recorder binary.") parser.add_argument("--binary", default="video_game_env/main", help="Path to the recorder binary.") parser.add_argument("-f", "--framerate", type=int, default=20, help="At what FPS we should store experiences (default: 20)") parser.add_argument("-q", "--quality", type=int, default=80, help="JPEG compression quality (default: 80)") parser.add_argument("process_name", type=str, help="Name of process to be recorded.") parser.add_argument("env_name", type=str, help="Name to be used when storing samples.") parser.add_argument("output", type=str, help="Root directory for saved recordings.")
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/admissionregistration_v1beta1_webhook_client_config.py
AdmissionregistrationV1beta1WebhookClientConfig.ca_bundle
python
def ca_bundle(self, ca_bundle): if (self.local_vars_configuration.client_side_validation and ca_bundle is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle)): raise ValueError(r"Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") self._ca_bundle = ca_bundle
Sets the ca_bundle of this AdmissionregistrationV1beta1WebhookClientConfig. `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. # noqa: E501 :param ca_bundle: The ca_bundle of this AdmissionregistrationV1beta1WebhookClientConfig. # noqa: E501 :type: str
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/admissionregistration_v1beta1_webhook_client_config.py#L77-L89
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class AdmissionregistrationV1beta1WebhookClientConfig(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'ca_bundle': 'str', 'service': 'AdmissionregistrationV1beta1ServiceReference', 'url': 'str' } attribute_map = { 'ca_bundle': 'caBundle', 'service': 'service', 'url': 'url' } def __init__(self, ca_bundle=None, service=None, url=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._ca_bundle = None self._service = None self._url = None self.discriminator = None if ca_bundle is not None: self.ca_bundle = ca_bundle if service is not None: self.service = service if url is not None: self.url = url @property def ca_bundle(self): return self._ca_bundle @ca_bundle.setter
Apache License 2.0
hbdmapi/huobi_futures_python
alpha/quant.py
Quant.initialize
python
def initialize(self, config_module=None): self._get_event_loop() self._load_settings(config_module) self._init_logger() self._init_db_instance() self._get_version() self._do_heartbeat()
Initialize. Args: config_module: config file path, normally it"s a json file.
https://github.com/hbdmapi/huobi_futures_python/blob/a505cfef0591d4adc610b7ef11bd06cb2d2ae2a5/alpha/quant.py#L29-L40
import signal import asyncio from alpha.utils import logger from alpha.config import config from alpha.const import VERSION class Quant: def __init__(self): self.loop = None
MIT License
hopshadoop/hdfscontents
hdfscontents/hdfsio.py
HDFSManagerMixin._hdfs_ensure_dir_exists
python
def _hdfs_ensure_dir_exists(self, hdfs_path): if not self.hdfs.exists(hdfs_path): try: self.hdfs.create_directory(hdfs_path) self.hdfs.chmod(hdfs_path, 0o0770) except OSError as e: if e.errno != errno.EEXIST: raise elif not self._hdfs_dir_exists(hdfs_path): raise IOError("%r exists but is not a directory" % hdfs_path)
ensure that a directory exists If it doesn't exist, try to create it and protect against a race condition if another process is doing the same.
https://github.com/hopshadoop/hdfscontents/blob/1eafd6260f2edca0ec9093196167d2233fdecfb2/hdfscontents/hdfsio.py#L174-L189
from contextlib import contextmanager import errno import os from tornado.web import HTTPError from notebook.utils import ( to_api_path, to_os_path, ) import nbformat from pydoop.hdfs.path import split from ipython_genutils.py3compat import str_to_unicode from traitlets.config import Configurable from traitlets import Bool, Integer, Unicode, default, Instance try: from base64 import encodebytes, decodebytes except ImportError: from base64 import encodestring as encodebytes, decodestring as decodebytes def path_to_intermediate(path): dirname, basename = os.path.split(path) return os.path.join(dirname, '.~' + basename) def path_to_invalid(path): dirname, basename = os.path.split(path) return os.path.join(dirname, basename + '.invalid') def hdfs_copy_file(hdfs, src, dst): chunk = 2 ** 16 with hdfs.open_file(dst, 'w') as f1: with hdfs.open_file(src, 'r') as f2: while True: out = f2.read(chunk) if len(out) == 0: break f1.write(out) hdfs.chmod(dst, 0o0770) def hdfs_replace_file(hdfs, src, dst): hdfs.delete(dst) hdfs.move(src, hdfs, dst) hdfs.chmod(dst, 0o0770) def hdfs_file_exists(hdfs, hdfs_path): return hdfs.exists(hdfs_path) and hdfs.get_path_info(hdfs_path).get(u'kind') == u'file' @contextmanager def atomic_writing(hdfs, hdfs_path): tmp_path = path_to_intermediate(hdfs_path) if hdfs_file_exists(hdfs, hdfs_path): hdfs_copy_file(hdfs, hdfs_path, tmp_path) fileobj = hdfs.open_file(hdfs_path, 'w') try: yield fileobj except: fileobj.close() hdfs_replace_file(hdfs, tmp_path, hdfs_path) raise fileobj.flush() fileobj.close() hdfs.chmod(hdfs_path, 0o0770) if hdfs_file_exists(hdfs, tmp_path): hdfs.delete(tmp_path) @contextmanager def _simple_writing(hdfs, hdfs_path): fileobj = hdfs.open_file(hdfs_path, 'w') try: yield fileobj except: fileobj.close() raise fileobj.flush() fileobj.close() hdfs.chmod(hdfs_path, 0o0770) class HDFSManagerMixin(Configurable): use_atomic_writing = Bool(True, config=True, help= """By default notebooks are saved on disk on a temporary file and then if succefully written, it replaces the old ones. This procedure, namely 'atomic_writing', causes some bugs on file system whitout operation order enforcement (like some networked fs). If set to False, the new notebook is written directly on the old one which could fail (eg: full filesystem or quota )""") def _hdfs_dir_exists(self, hdfs_path): if self.hdfs.exists(hdfs_path): return self.hdfs.get_path_info(hdfs_path).get(u'kind') == u'directory' else: return False
Apache License 2.0
fusionauth/fusionauth-python-client
src/main/python/fusionauth/fusionauth_client.py
FusionAuthClient.create_user_consent
python
def create_user_consent(self, request, user_consent_id=None): return self.start().uri('/api/user/consent') .url_segment(user_consent_id) .body_handler(JSONBodyHandler(request)) .post() .go()
Creates a single User consent. Attributes: user_consent_id: (Optional) The Id for the User consent. If not provided a secure random UUID will be generated. request: The request that contains the user consent information.
https://github.com/fusionauth/fusionauth-python-client/blob/20bf313710eb0af6bfb9c07b7864b52fe5853eb0/src/main/python/fusionauth/fusionauth_client.py#L497-L509
from deprecated import deprecated from fusionauth.rest_client import RESTClient, JSONBodyHandler, FormDataBodyHandler class FusionAuthClient: def __init__(self, api_key, base_url): self.api_key = api_key self.base_url = base_url self.tenant_id = None def set_tenant_id(self, tenant_id): self.tenant_id = tenant_id def action_user(self, request): return self.start().uri('/api/user/action') .body_handler(JSONBodyHandler(request)) .post() .go() def activate_reactor(self, request): return self.start().uri('/api/reactor') .body_handler(JSONBodyHandler(request)) .post() .go() def add_user_to_family(self, family_id, request): return self.start().uri('/api/user/family') .url_segment(family_id) .body_handler(JSONBodyHandler(request)) .put() .go() def cancel_action(self, action_id, request): return self.start().uri('/api/user/action') .url_segment(action_id) .body_handler(JSONBodyHandler(request)) .delete() .go() def change_password(self, change_password_id, request): return self.start_anonymous().uri('/api/user/change-password') .url_segment(change_password_id) .body_handler(JSONBodyHandler(request)) .post() .go() def change_password_by_identity(self, request): return self.start().uri('/api/user/change-password') .body_handler(JSONBodyHandler(request)) .post() .go() def comment_on_user(self, request): return self.start().uri('/api/user/comment') .body_handler(JSONBodyHandler(request)) .post() .go() def create_api_key(self, request, key_id=None): return self.start().uri('/api/api-key') .url_segment(key_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_application(self, request, application_id=None): return self.start().uri('/api/application') .url_segment(application_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_application_role(self, application_id, request, role_id=None): return self.start().uri('/api/application') .url_segment(application_id) .url_segment("role") .url_segment(role_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_audit_log(self, request): return self.start().uri('/api/system/audit-log') .body_handler(JSONBodyHandler(request)) .post() .go() def create_connector(self, request, connector_id=None): return self.start().uri('/api/connector') .url_segment(connector_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_consent(self, request, consent_id=None): return self.start().uri('/api/consent') .url_segment(consent_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_email_template(self, request, email_template_id=None): return self.start().uri('/api/email/template') .url_segment(email_template_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_entity(self, request, entity_id=None): return self.start().uri('/api/entity') .url_segment(entity_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_entity_type(self, request, entity_type_id=None): return self.start().uri('/api/entity/type') .url_segment(entity_type_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_entity_type_permission(self, entity_type_id, request, permission_id=None): return self.start().uri('/api/entity/type') .url_segment(entity_type_id) .url_segment("permission") .url_segment(permission_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_family(self, request, family_id=None): return self.start().uri('/api/user/family') .url_segment(family_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_form(self, request, form_id=None): return self.start().uri('/api/form') .url_segment(form_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_form_field(self, request, field_id=None): return self.start().uri('/api/form/field') .url_segment(field_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_group(self, request, group_id=None): return self.start().uri('/api/group') .url_segment(group_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_group_members(self, request): return self.start().uri('/api/group/member') .body_handler(JSONBodyHandler(request)) .post() .go() def create_ip_access_control_list(self, request, access_control_list_id=None): return self.start().uri('/api/ip-acl') .url_segment(access_control_list_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_identity_provider(self, request, identity_provider_id=None): return self.start().uri('/api/identity-provider') .url_segment(identity_provider_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_lambda(self, request, lambda_id=None): return self.start().uri('/api/lambda') .url_segment(lambda_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_message_template(self, request, message_template_id=None): return self.start().uri('/api/message/template') .url_segment(message_template_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_messenger(self, request, messenger_id=None): return self.start().uri('/api/messenger') .url_segment(messenger_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_tenant(self, request, tenant_id=None): return self.start().uri('/api/tenant') .url_segment(tenant_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_theme(self, request, theme_id=None): return self.start().uri('/api/theme') .url_segment(theme_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_user(self, request, user_id=None): return self.start().uri('/api/user') .url_segment(user_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_user_action(self, request, user_action_id=None): return self.start().uri('/api/user-action') .url_segment(user_action_id) .body_handler(JSONBodyHandler(request)) .post() .go() def create_user_action_reason(self, request, user_action_reason_id=None): return self.start().uri('/api/user-action-reason') .url_segment(user_action_reason_id) .body_handler(JSONBodyHandler(request)) .post() .go()
Apache License 2.0
stlehmann/pyads
pyads/constants.py
PLCTYPE_ARR_SHORT
python
def PLCTYPE_ARR_SHORT(n: int) -> Type[Array]: return c_int16 * n
Return an array with n short values.
https://github.com/stlehmann/pyads/blob/3c505092dafb2cd3f85c77ab6c700b99976cf5da/pyads/constants.py#L166-L168
from typing import Type, Dict, Callable, Union from ctypes import ( Array, c_bool, c_ubyte, c_int8, c_uint8, c_int16, c_uint16, c_int32, c_uint32, c_float, c_double, c_char, c_int64, c_uint64, ) STRING_BUFFER: int = 1024 PLC_DEFAULT_STRING_SIZE: int = 80 MAX_ADS_SUB_COMMANDS: int = 500 PLCTYPE_BOOL = c_bool PLCTYPE_BYTE = c_ubyte PLCTYPE_DWORD = c_uint32 PLCTYPE_DINT = c_int32 PLCTYPE_INT = c_int16 PLCTYPE_LREAL = c_double PLCTYPE_REAL = c_float PLCTYPE_SINT = c_int8 PLCTYPE_STRING = c_char PLCTYPE_TOD = c_int32 PLCTYPE_UBYTE = c_ubyte PLCTYPE_UDINT = c_uint32 PLCTYPE_UINT = c_uint16 PLCTYPE_USINT = c_uint8 PLCTYPE_WORD = c_uint16 PLCTYPE_LINT = c_int64 PLCTYPE_ULINT = c_uint64 PLCTYPE_DATE = PLCTYPE_DWORD PLCTYPE_DATE_AND_TIME = PLCTYPE_DWORD PLCTYPE_DT = PLCTYPE_DWORD PLCTYPE_TIME = PLCTYPE_DWORD PLCSimpleDataType = Union[ PLCTYPE_BOOL, PLCTYPE_BYTE, PLCTYPE_DWORD, PLCTYPE_DINT, PLCTYPE_INT, PLCTYPE_LREAL, PLCTYPE_REAL, PLCTYPE_SINT, PLCTYPE_STRING, PLCTYPE_TOD, PLCTYPE_UBYTE, PLCTYPE_UDINT, PLCTYPE_UINT, PLCTYPE_USINT, PLCTYPE_WORD, PLCTYPE_LINT, PLCTYPE_ULINT, PLCTYPE_DATE, PLCTYPE_DATE_AND_TIME, PLCTYPE_DT, PLCTYPE_TIME, ] PLCDataType = Union[Array, PLCSimpleDataType] DATATYPE_MAP: Dict[Type, str] = { PLCTYPE_BOOL: "<?", PLCTYPE_BYTE: "<B", PLCTYPE_DINT: "<i", PLCTYPE_DWORD: "<I", PLCTYPE_INT: "<h", PLCTYPE_LREAL: "<d", PLCTYPE_REAL: "<f", PLCTYPE_SINT: "<b", PLCTYPE_UDINT: "<I", PLCTYPE_UINT: "<H", PLCTYPE_USINT: "<B", PLCTYPE_LINT: "<q", PLCTYPE_ULINT: "<Q", PLCTYPE_WORD: "<H", } ADST_VOID: int = 0 ADST_INT8: int = 16 ADST_UINT8: int = 17 ADST_INT16: int = 2 ADST_UINT16: int = 18 ADST_INT32: int = 3 ADST_UINT32: int = 19 ADST_INT64: int = 20 ADST_UINT64: int = 21 ADST_REAL32: int = 4 ADST_REAL64: int = 5 ADST_BIGTYPE: int= 65 ADST_STRING: int = 30 ADST_WSTRING: int = 31 ADST_REAL80: int = 32 ADST_BIT: int = 33 ADST_MAXTYPES: int = 34 ads_type_to_ctype = { ADST_INT8: PLCTYPE_BYTE, ADST_UINT8: PLCTYPE_UBYTE, ADST_INT16: PLCTYPE_INT, ADST_UINT16: PLCTYPE_UINT, ADST_INT32: PLCTYPE_DINT, ADST_UINT32: PLCTYPE_UDINT, ADST_INT64: PLCTYPE_LINT, ADST_UINT64: PLCTYPE_ULINT, ADST_REAL32: PLCTYPE_REAL, ADST_REAL64: PLCTYPE_LREAL, ADST_STRING: PLCTYPE_STRING, ADST_BIT: PLCTYPE_BOOL, } def PLCTYPE_ARR_REAL(n: int) -> Type[Array]: return c_float * n def PLCTYPE_ARR_LREAL(n: int) -> Type[Array]: return c_double * n def PLCTYPE_ARR_BOOL(n: int) -> Type[Array]: return c_bool * n def PLCTYPE_ARR_INT(n: int) -> Type[Array]: return c_int16 * n def PLCTYPE_ARR_UINT(n: int) -> Type[Array]: return c_uint16 * n
MIT License
marqeta/marqeta-python
marqeta/resources/commando_modes.py
CommandoModesCollection.__call__
python
def __call__(self, token): return CommandoModesContext(token, self.client)
Special case call made with token :param token: commandomodes token :return: CommandoModesContext object
https://github.com/marqeta/marqeta-python/blob/66fa690eb910825c510a391720b0fe717fac0234/marqeta/resources/commando_modes.py#L24-L30
from marqeta.resources.collection import Collection from marqeta.response_models.commando_mode_response import CommandoModeResponse from marqeta.response_models.commando_mode_transition_response import CommandoModeTransitionResponse class CommandoModesCollection(object): _endpoint = 'commandomodes' def __init__(self, client): self.client = client self.collections = Collection(self.client, CommandoModeResponse)
MIT License
simplejwt/django-rest-framework-simplejwt
rest_framework_simplejwt/tokens.py
BlacklistMixin.blacklist
python
def blacklist(self): jti = self.payload[api_settings.JTI_CLAIM] exp = self.payload['exp'] token, _ = OutstandingToken.objects.get_or_create( jti=jti, defaults={ 'token': str(self), 'expires_at': datetime_from_epoch(exp), }, ) return BlacklistedToken.objects.get_or_create(token=token)
Ensures this token is included in the outstanding token list and adds it to the blacklist.
https://github.com/simplejwt/django-rest-framework-simplejwt/blob/2003a24276f334c5e1d1b03c91d5343c0d3376bf/rest_framework_simplejwt/tokens.py#L218-L235
from datetime import timedelta from uuid import uuid4 from django.conf import settings from django.utils.translation import gettext_lazy as _ from django.utils.module_loading import import_string from .exceptions import TokenBackendError, TokenError from .settings import api_settings from .token_blacklist.models import BlacklistedToken, OutstandingToken from .utils import ( aware_utcnow, datetime_from_epoch, datetime_to_epoch, format_lazy, ) class Token: token_type = None lifetime = None def __init__(self, token=None, verify=True): if self.token_type is None or self.lifetime is None: raise TokenError(_('Cannot create token with no type or lifetime')) self.token = token self.current_time = aware_utcnow() if token is not None: token_backend = self.get_token_backend() try: self.payload = token_backend.decode(token, verify=verify) except TokenBackendError: raise TokenError(_('Token is invalid or expired')) if verify: self.verify() else: self.payload = {api_settings.TOKEN_TYPE_CLAIM: self.token_type} self.set_exp(from_time=self.current_time, lifetime=self.lifetime) self.set_iat(at_time=self.current_time) self.set_jti() def __repr__(self): return repr(self.payload) def __getitem__(self, key): return self.payload[key] def __setitem__(self, key, value): self.payload[key] = value def __delitem__(self, key): del self.payload[key] def __contains__(self, key): return key in self.payload def get(self, key, default=None): return self.payload.get(key, default) def __str__(self): return self.get_token_backend().encode(self.payload) def verify(self): self.check_exp() if api_settings.JTI_CLAIM not in self.payload: raise TokenError(_('Token has no id')) self.verify_token_type() def verify_token_type(self): try: token_type = self.payload[api_settings.TOKEN_TYPE_CLAIM] except KeyError: raise TokenError(_('Token has no type')) if self.token_type != token_type: raise TokenError(_('Token has wrong type')) def set_jti(self): self.payload[api_settings.JTI_CLAIM] = uuid4().hex def set_exp(self, claim='exp', from_time=None, lifetime=None): if from_time is None: from_time = self.current_time if lifetime is None: lifetime = self.lifetime self.payload[claim] = datetime_to_epoch(from_time + lifetime) def set_iat(self, claim='iat', at_time=None): if at_time is None: at_time = self.current_time self.payload[claim] = datetime_to_epoch(at_time) def check_exp(self, claim='exp', current_time=None): if current_time is None: current_time = self.current_time try: claim_value = self.payload[claim] except KeyError: raise TokenError(format_lazy(_("Token has no '{}' claim"), claim)) claim_time = datetime_from_epoch(claim_value) if claim_time <= current_time: raise TokenError(format_lazy(_("Token '{}' claim has expired"), claim)) @classmethod def for_user(cls, user): user_id = getattr(user, api_settings.USER_ID_FIELD) if not isinstance(user_id, int): user_id = str(user_id) token = cls() token[api_settings.USER_ID_CLAIM] = user_id return token _token_backend = None def get_token_backend(self): if self._token_backend is None: self._token_backend = import_string( "rest_framework_simplejwt.state.token_backend" ) return self._token_backend class BlacklistMixin: if 'rest_framework_simplejwt.token_blacklist' in settings.INSTALLED_APPS: def verify(self, *args, **kwargs): self.check_blacklist() super().verify(*args, **kwargs) def check_blacklist(self): jti = self.payload[api_settings.JTI_CLAIM] if BlacklistedToken.objects.filter(token__jti=jti).exists(): raise TokenError(_('Token is blacklisted'))
MIT License
rbuffat/pyepw
pyepw/epw.py
DesignCondition.unkown_field
python
def unkown_field(self): return self._unkown_field
Get unkown_field. Returns: str: the value of `unkown_field` or None if not set
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L945-L952
from collections import OrderedDict import re class Location(object): _internal_name = "LOCATION" field_count = 9 def __init__(self): self._city = None self._state_province_region = None self._country = None self._source = None self._wmo = None self._latitude = None self._longitude = None self._timezone = None self._elevation = None def read(self, vals): i = 0 if len(vals[i]) == 0: self.city = None else: self.city = vals[i] i += 1 if len(vals[i]) == 0: self.state_province_region = None else: self.state_province_region = vals[i] i += 1 if len(vals[i]) == 0: self.country = None else: self.country = vals[i] i += 1 if len(vals[i]) == 0: self.source = None else: self.source = vals[i] i += 1 if len(vals[i]) == 0: self.wmo = None else: self.wmo = vals[i] i += 1 if len(vals[i]) == 0: self.latitude = None else: self.latitude = vals[i] i += 1 if len(vals[i]) == 0: self.longitude = None else: self.longitude = vals[i] i += 1 if len(vals[i]) == 0: self.timezone = None else: self.timezone = vals[i] i += 1 if len(vals[i]) == 0: self.elevation = None else: self.elevation = vals[i] i += 1 @property def city(self): return self._city @city.setter def city(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `city`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `city`') self._city = value @property def state_province_region(self): return self._state_province_region @state_province_region.setter def state_province_region(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `state_province_region`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `state_province_region`') self._state_province_region = value @property def country(self): return self._country @country.setter def country(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `country`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `country`') self._country = value @property def source(self): return self._source @source.setter def source(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `source`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `source`') self._source = value @property def wmo(self): return self._wmo @wmo.setter def wmo(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `wmo`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `wmo`') self._wmo = value @property def latitude(self): return self._latitude @latitude.setter def latitude(self, value=0.0): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `latitude`'.format(value)) if value < -90.0: raise ValueError('value need to be greater or equal -90.0 ' 'for field `latitude`') if value > 90.0: raise ValueError('value need to be smaller 90.0 ' 'for field `latitude`') self._latitude = value @property def longitude(self): return self._longitude @longitude.setter def longitude(self, value=0.0): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `longitude`'.format(value)) if value < -180.0: raise ValueError('value need to be greater or equal -180.0 ' 'for field `longitude`') if value > 180.0: raise ValueError('value need to be smaller 180.0 ' 'for field `longitude`') self._longitude = value @property def timezone(self): return self._timezone @timezone.setter def timezone(self, value=0.0): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `timezone`'.format(value)) if value < -12.0: raise ValueError('value need to be greater or equal -12.0 ' 'for field `timezone`') if value > 12.0: raise ValueError('value need to be smaller 12.0 ' 'for field `timezone`') self._timezone = value @property def elevation(self): return self._elevation @elevation.setter def elevation(self, value=0.0): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `elevation`'.format(value)) if value < -1000.0: raise ValueError('value need to be greater or equal -1000.0 ' 'for field `elevation`') if value >= 9999.9: raise ValueError('value need to be smaller 9999.9 ' 'for field `elevation`') self._elevation = value @classmethod def _to_str(cls, value): if value is None: return '' else: return str(value) def export(self, top=True): out = [] if top: out.append(self._internal_name) out.append(self._to_str(self.city)) out.append(self._to_str(self.state_province_region)) out.append(self._to_str(self.country)) out.append(self._to_str(self.source)) out.append(self._to_str(self.wmo)) out.append(self._to_str(self.latitude)) out.append(self._to_str(self.longitude)) out.append(self._to_str(self.timezone)) out.append(self._to_str(self.elevation)) return ",".join(out) def __str__(self): return self.export(True) class DesignCondition(object): _internal_name = "DESIGN CONDITION" field_count = 68 def __init__(self): self._title_of_design_condition = None self._unkown_field = None self._design_stat_heating = None self._coldestmonth = None self._db996 = None self._db990 = None self._dp996 = None self._hr_dp996 = None self._db_dp996 = None self._dp990 = None self._hr_dp990 = None self._db_dp990 = None self._ws004c = None self._db_ws004c = None self._ws010c = None self._db_ws010c = None self._ws_db996 = None self._wd_db996 = None self._design_stat_cooling = None self._hottestmonth = None self._dbr = None self._db004 = None self._wb_db004 = None self._db010 = None self._wb_db010 = None self._db020 = None self._wb_db020 = None self._wb004 = None self._db_wb004 = None self._wb010 = None self._db_wb010 = None self._wb020 = None self._db_wb020 = None self._ws_db004 = None self._wd_db004 = None self._dp004 = None self._hr_dp004 = None self._db_dp004 = None self._dp010 = None self._hr_dp010 = None self._db_dp010 = None self._dp020 = None self._hr_dp020 = None self._db_dp020 = None self._en004 = None self._db_en004 = None self._en010 = None self._db_en010 = None self._en020 = None self._db_en020 = None self._hrs_84_and_db12_8_or_20_6 = None self._design_stat_extremes = None self._ws010 = None self._ws025 = None self._ws050 = None self._wbmax = None self._dbmin_mean = None self._dbmax_mean = None self._dbmin_stddev = None self._dbmax_stddev = None self._dbmin05years = None self._dbmax05years = None self._dbmin10years = None self._dbmax10years = None self._dbmin20years = None self._dbmax20years = None self._dbmin50years = None self._dbmax50years = None def read(self, vals): i = 0 if len(vals[i]) == 0: self.title_of_design_condition = None else: self.title_of_design_condition = vals[i] i += 1 if len(vals[i]) == 0: self.unkown_field = None else: self.unkown_field = vals[i] i += 1 if len(vals[i]) == 0: self.design_stat_heating = None else: self.design_stat_heating = vals[i] i += 1 if len(vals[i]) == 0: self.coldestmonth = None else: self.coldestmonth = vals[i] i += 1 if len(vals[i]) == 0: self.db996 = None else: self.db996 = vals[i] i += 1 if len(vals[i]) == 0: self.db990 = None else: self.db990 = vals[i] i += 1 if len(vals[i]) == 0: self.dp996 = None else: self.dp996 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp996 = None else: self.hr_dp996 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp996 = None else: self.db_dp996 = vals[i] i += 1 if len(vals[i]) == 0: self.dp990 = None else: self.dp990 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp990 = None else: self.hr_dp990 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp990 = None else: self.db_dp990 = vals[i] i += 1 if len(vals[i]) == 0: self.ws004c = None else: self.ws004c = vals[i] i += 1 if len(vals[i]) == 0: self.db_ws004c = None else: self.db_ws004c = vals[i] i += 1 if len(vals[i]) == 0: self.ws010c = None else: self.ws010c = vals[i] i += 1 if len(vals[i]) == 0: self.db_ws010c = None else: self.db_ws010c = vals[i] i += 1 if len(vals[i]) == 0: self.ws_db996 = None else: self.ws_db996 = vals[i] i += 1 if len(vals[i]) == 0: self.wd_db996 = None else: self.wd_db996 = vals[i] i += 1 if len(vals[i]) == 0: self.design_stat_cooling = None else: self.design_stat_cooling = vals[i] i += 1 if len(vals[i]) == 0: self.hottestmonth = None else: self.hottestmonth = vals[i] i += 1 if len(vals[i]) == 0: self.dbr = None else: self.dbr = vals[i] i += 1 if len(vals[i]) == 0: self.db004 = None else: self.db004 = vals[i] i += 1 if len(vals[i]) == 0: self.wb_db004 = None else: self.wb_db004 = vals[i] i += 1 if len(vals[i]) == 0: self.db010 = None else: self.db010 = vals[i] i += 1 if len(vals[i]) == 0: self.wb_db010 = None else: self.wb_db010 = vals[i] i += 1 if len(vals[i]) == 0: self.db020 = None else: self.db020 = vals[i] i += 1 if len(vals[i]) == 0: self.wb_db020 = None else: self.wb_db020 = vals[i] i += 1 if len(vals[i]) == 0: self.wb004 = None else: self.wb004 = vals[i] i += 1 if len(vals[i]) == 0: self.db_wb004 = None else: self.db_wb004 = vals[i] i += 1 if len(vals[i]) == 0: self.wb010 = None else: self.wb010 = vals[i] i += 1 if len(vals[i]) == 0: self.db_wb010 = None else: self.db_wb010 = vals[i] i += 1 if len(vals[i]) == 0: self.wb020 = None else: self.wb020 = vals[i] i += 1 if len(vals[i]) == 0: self.db_wb020 = None else: self.db_wb020 = vals[i] i += 1 if len(vals[i]) == 0: self.ws_db004 = None else: self.ws_db004 = vals[i] i += 1 if len(vals[i]) == 0: self.wd_db004 = None else: self.wd_db004 = vals[i] i += 1 if len(vals[i]) == 0: self.dp004 = None else: self.dp004 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp004 = None else: self.hr_dp004 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp004 = None else: self.db_dp004 = vals[i] i += 1 if len(vals[i]) == 0: self.dp010 = None else: self.dp010 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp010 = None else: self.hr_dp010 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp010 = None else: self.db_dp010 = vals[i] i += 1 if len(vals[i]) == 0: self.dp020 = None else: self.dp020 = vals[i] i += 1 if len(vals[i]) == 0: self.hr_dp020 = None else: self.hr_dp020 = vals[i] i += 1 if len(vals[i]) == 0: self.db_dp020 = None else: self.db_dp020 = vals[i] i += 1 if len(vals[i]) == 0: self.en004 = None else: self.en004 = vals[i] i += 1 if len(vals[i]) == 0: self.db_en004 = None else: self.db_en004 = vals[i] i += 1 if len(vals[i]) == 0: self.en010 = None else: self.en010 = vals[i] i += 1 if len(vals[i]) == 0: self.db_en010 = None else: self.db_en010 = vals[i] i += 1 if len(vals[i]) == 0: self.en020 = None else: self.en020 = vals[i] i += 1 if len(vals[i]) == 0: self.db_en020 = None else: self.db_en020 = vals[i] i += 1 if len(vals[i]) == 0: self.hrs_84_and_db12_8_or_20_6 = None else: self.hrs_84_and_db12_8_or_20_6 = vals[i] i += 1 if len(vals[i]) == 0: self.design_stat_extremes = None else: self.design_stat_extremes = vals[i] i += 1 if len(vals[i]) == 0: self.ws010 = None else: self.ws010 = vals[i] i += 1 if len(vals[i]) == 0: self.ws025 = None else: self.ws025 = vals[i] i += 1 if len(vals[i]) == 0: self.ws050 = None else: self.ws050 = vals[i] i += 1 if len(vals[i]) == 0: self.wbmax = None else: self.wbmax = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin_mean = None else: self.dbmin_mean = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax_mean = None else: self.dbmax_mean = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin_stddev = None else: self.dbmin_stddev = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax_stddev = None else: self.dbmax_stddev = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin05years = None else: self.dbmin05years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax05years = None else: self.dbmax05years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin10years = None else: self.dbmin10years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax10years = None else: self.dbmax10years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin20years = None else: self.dbmin20years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax20years = None else: self.dbmax20years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmin50years = None else: self.dbmin50years = vals[i] i += 1 if len(vals[i]) == 0: self.dbmax50years = None else: self.dbmax50years = vals[i] i += 1 @property def title_of_design_condition(self): return self._title_of_design_condition @title_of_design_condition.setter def title_of_design_condition(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `title_of_design_condition`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `title_of_design_condition`') self._title_of_design_condition = value @property
Apache License 2.0
vagrawal/deepsphinx
deepsphinx/attention.py
BahdanauAttentionCutoff.__init__
python
def __init__(self, num_units, memory, memory_sequence_length=None, normalize=False, score_mask_value=float('-inf'), name='BahdanauAttention'): def probability_fn_cutoff(scores, previous_alignments): ran = tf.range(tf.to_float( tf.shape(previous_alignments)[1]), dtype=tf.float32) mean = (tf.reduce_sum(ran * previous_alignments, axis=1) / tf.reduce_sum(previous_alignments, axis=1)) mask = tf.logical_and( ran > mean - FLAGS.cutoff_range, ran < mean + FLAGS.cutoff_range) return tf.nn.softmax(tf.where(mask, scores, tf.ones_like(scores) * -1000)) probability_fn = tf.nn.softmax def probability_fn_cutoff(score, _): return probability_fn(score) super(BahdanauAttentionCutoff, self).__init__( query_layer=Dense( num_units, name='query_layer', use_bias=False), memory_layer=Dense( num_units, name='memory_layer', use_bias=False), memory=memory, probability_fn=probability_fn_cutoff, memory_sequence_length=memory_sequence_length, score_mask_value=score_mask_value, name=name) self._num_units = num_units self._normalize = normalize self._name = name dtype = tf.float32 self.v = tf.get_variable( 'attention_v', [self._num_units], dtype=dtype) if FLAGS.use_conv_feat_att: self.conv_filt = tf.get_variable( 'conv_filter', shape = [200, 1, num_units]) if self._normalize: self.g = tf.get_variable( 'attention_g', dtype=dtype, initializer=tf.sqrt((1. / self._num_units))) self.b = tf.get_variable( 'attention_b', [self._num_units], dtype=dtype, initializer=tf.zeros_initializer())
Construct the Attention mechanism. Args: num_units: The depth of the query mechanism. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. memory_sequence_length (optional): Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. normalize: Python boolean. Whether to normalize the energy term. probability_fn: (optional) A `callable`. Converts the score to probabilities. The default is @{tf.nn.softmax}. Other options include @{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}. Its signature should be: `probabilities = probability_fn(score)`. score_mask_value: (optional): The mask value for score before passing into `probability_fn`. The default is -inf. Only used if `memory_sequence_length` is not None. name: Name to use when creating ops.
https://github.com/vagrawal/deepsphinx/blob/5fa7a2e3f22a69d956cc4866a40f73fcdecb14e2/deepsphinx/attention.py#L25-L94
import tensorflow as tf from tensorflow.python.layers.core import Dense from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors from deepsphinx.utils import FLAGS class BahdanauAttentionCutoff(tf.contrib.seq2seq.BahdanauAttention.__base__):
MIT License
santinic/pampy
pampy/pampy.py
match
python
def match(var, *args, default=NoDefault, strict=True): if len(args) % 2 != 0: raise MatchError("Every guard must have an action.") if default is NoDefault and strict is False: default = False pairs = list(pairwise(args)) patterns = [patt for (patt, action) in pairs] for patt, action in pairs: matched_as_value, args = match_value(patt, var) if matched_as_value: lambda_args = args if len(args) > 0 else BoxedArgs(var) return run(action, lambda_args) if default is NoDefault: if _ not in patterns: raise MatchError("'_' not provided. This case is not handled:\n%s" % str(var)) else: return default
Match `var` against a number of potential patterns. Example usage: ``` match(x, 3, "this matches the number 3", int, "matches any integer", (str, int), lambda a, b: "a tuple (a, b) you can use in a function", [1, 2, _], "any list of 3 elements that begins with [1, 2]", {'x': _}, "any dict with a key 'x' and any value associated", _, "anything else" ) ``` :param var: The variable to test patterns against. :param args: Alternating patterns and actions. There must be an action for every pattern specified. Patterns can take many forms, see README.md for examples. Actions can be either a literal value or a callable which will be called with the arguments that were matched in corresponding pattern. :param default: If `default` is specified then it will be returned if none of the patterns match. If `default` is unspecified then a `MatchError` will be thrown instead. :return: The result of the action which corresponds to the first matching pattern.
https://github.com/santinic/pampy/blob/665c6b88bca00a0b1a9a744ebd0764dcdecafab4/pampy/pampy.py#L260-L305
from collections.abc import ( Iterable, Mapping, Callable as ACallable, ) from itertools import zip_longest from enum import Enum from typing import ( Any, Generic, TypeVar, Tuple, List, Pattern as RegexPattern, Callable, ) import inspect from pampy.helpers import ( UnderscoreType, HeadType, TailType, get_lambda_args_error_msg, BoxedArgs, PaddedValue, NoDefault, is_typing_stuff, is_dataclass, is_generic, is_newtype, is_union, pairwise, peek, get_real_type, get_extra, ) T = TypeVar('T') _ = ANY = UnderscoreType() HEAD = HeadType() REST = TAIL = TailType() def run(action, var): if callable(action): if isinstance(var, Iterable): try: return action(*var) except TypeError as err: raise MatchError(get_lambda_args_error_msg(action, var, err)) elif isinstance(var, BoxedArgs): return action(var.get()) else: return action(var) else: return action def match_value(pattern, value) -> Tuple[bool, List]: if value is PaddedValue: return False, [] elif is_typing_stuff(pattern): return match_typing_stuff(pattern, value) elif isinstance(pattern, (int, float, str, bool, Enum)): eq = pattern == value type_eq = type(pattern) == type(value) return eq and type_eq, [] elif pattern is None: return value is None, [] elif isinstance(pattern, type): if isinstance(value, pattern): return True, [value] elif isinstance(pattern, (list, tuple)): return match_iterable(pattern, value) elif isinstance(pattern, dict): return match_dict(pattern, value) elif callable(pattern): return_value = pattern(value) if isinstance(return_value, bool): return return_value, [value] elif isinstance(return_value, tuple) and len(return_value) == 2 and isinstance(return_value[0], bool) and isinstance(return_value[1], list): return return_value else: raise MatchError("Warning! pattern function %s is not returning a boolean " "nor a tuple of (boolean, list), but instead %s" % (pattern, return_value)) elif isinstance(pattern, RegexPattern): rematch = pattern.search(value) if rematch is not None: return True, list(rematch.groups()) elif pattern is _: return True, [value] elif pattern is HEAD or pattern is TAIL: raise MatchError("HEAD or TAIL should only be used inside an Iterable (list or tuple).") elif is_dataclass(pattern) and pattern.__class__ == value.__class__: return match_dict(pattern.__dict__, value.__dict__) return False, [] def match_dict(pattern, value) -> Tuple[bool, List]: if not isinstance(value, dict) or not isinstance(pattern, dict): return False, [] total_extracted = [] still_usable_value_keys = set(value.keys()) still_usable_pattern_keys = set(pattern.keys()) for pkey, pval in pattern.items(): if pkey not in still_usable_pattern_keys: continue matched_left_and_right = False for vkey, vval in value.items(): if vkey not in still_usable_value_keys: continue if pkey not in still_usable_pattern_keys: continue key_matched, key_extracted = match_value(pkey, vkey) if key_matched: value_matched, value_extracted = match_value(pval, vval) if value_matched: total_extracted += key_extracted + value_extracted matched_left_and_right = True still_usable_pattern_keys.remove(pkey) still_usable_value_keys.remove(vkey) break if not matched_left_and_right: return False, [] return True, total_extracted def only_padded_values_follow(padded_pairs, i): i += 1 while i < len(padded_pairs): pattern, value = padded_pairs[i] if pattern is not PaddedValue: return False i += 1 return True def match_iterable(patterns, values) -> Tuple[bool, List]: if not isinstance(patterns, Iterable) or not isinstance(values, Iterable): return False, [] total_extracted = [] padded_pairs = list(zip_longest(patterns, values, fillvalue=PaddedValue)) for i, (pattern, value) in enumerate(padded_pairs): if pattern is HEAD: if i != 0: raise MatchError("HEAD can only be in first position of a pattern.") else: if value is PaddedValue: return False, [] else: total_extracted += [value] elif pattern is TAIL: if not only_padded_values_follow(padded_pairs, i): raise MatchError("TAIL must me in last position of the pattern.") else: tail = [value for (pattern, value) in padded_pairs[i:] if value is not PaddedValue] total_extracted.append(tail) break else: matched, extracted = match_value(pattern, value) if not matched: return False, [] else: total_extracted += extracted return True, total_extracted def match_typing_stuff(pattern, value) -> Tuple[bool, List]: if pattern == Any: return match_value(ANY, value) elif is_union(pattern): for subpattern in pattern.__args__: is_matched, extracted = match_value(subpattern, value) if is_matched: return True, extracted else: return False, [] elif is_newtype(pattern): return match_value(pattern.__supertype__, value) elif is_generic(pattern): return match_generic(pattern, value) else: return False, [] def match_generic(pattern: Generic[T], value) -> Tuple[bool, List]: if get_extra(pattern) == type: real_value = None if is_newtype(value): real_value = value value = get_real_type(value) if not inspect.isclass(value): return False, [] type_ = pattern.__args__[0] if type_ == Any: return True, [real_value or value] if is_newtype(type_): type_ = get_real_type(type_) if issubclass(value, type_): return True, [real_value or value] else: return False, [] elif get_extra(pattern) == ACallable: if callable(value): spec = inspect.getfullargspec(value) annotations = spec.annotations artgtypes = [annotations.get(arg, Any) for arg in spec.args] ret_type = annotations.get('return', Any) if pattern == Callable[[*artgtypes], ret_type]: return True, [value] else: return False, [] else: return False, [] elif get_extra(pattern) == tuple: return match_value(pattern.__args__, value) elif issubclass(get_extra(pattern), Mapping): type_matched, _captured = match_value(get_extra(pattern), value) if not type_matched: return False, [] k_type, v_type = pattern.__args__ key_example = peek(value) key_matched, _captured = match_value(k_type, key_example) if not key_matched: return False, [] value_matched, _captured = match_value(v_type, value[key_example]) if not value_matched: return False, [] else: return True, [value] elif issubclass(get_extra(pattern), Iterable): type_matched, _captured = match_value(get_extra(pattern), value) if not type_matched: return False, [] v_type, = pattern.__args__ v = peek(value) value_matched, _captured = match_value(v_type, v) if not value_matched: return False, [] else: return True, [value] else: return False, []
MIT License
pegase745/sublime-flowtype
flowtype/commands/add_pragma.py
FlowtypeAddPragma.is_enabled
python
def is_enabled(self): content = self.get_content() no_pragma = "// @flow" not in content and "/* @flow */" not in content return is_js_source(self.view) and no_pragma
Enable the command only on Javascript files and has flow pragma.
https://github.com/pegase745/sublime-flowtype/blob/d1f95f22fb698029d09771dfe0959eb2d7f0c722/flowtype/commands/add_pragma.py#L11-L16
from ..logger import Logger from .base import BaseCommand from ..helpers import is_js_source logger = Logger() class FlowtypeAddPragma(BaseCommand):
MIT License
openstack/tempest-lib
tempest_lib/services/compute/aggregates_client.py
AggregatesClient.show_aggregate
python
def show_aggregate(self, aggregate_id): resp, body = self.get("os-aggregates/%s" % aggregate_id) body = json.loads(body) self.validate_response(schema.get_aggregate, resp, body) return rest_client.ResponseBody(resp, body)
Get details of the given aggregate.
https://github.com/openstack/tempest-lib/blob/023426894a4f72d906ed6f79c55ed7152a732b44/tempest_lib/services/compute/aggregates_client.py#L32-L37
from oslo_serialization import jsonutils as json from tempest_lib.api_schema.response.compute.v2_1 import aggregates as schema from tempest_lib.common import rest_client from tempest_lib import exceptions as lib_exc class AggregatesClient(rest_client.RestClient): def list_aggregates(self): resp, body = self.get("os-aggregates") body = json.loads(body) self.validate_response(schema.list_aggregates, resp, body) return rest_client.ResponseBody(resp, body)
Apache License 2.0
airesearch-in-th/kora
kora/kaggle.py
ls
python
def ls(dataset): cmd = 'kaggle datasets files -v '+dataset return _show_csv(getoutput(cmd))
List all files for this dataset name
https://github.com/airesearch-in-th/kora/blob/dcf3cc4dec0caa91ffbee7e8942a57a433ab099f/kora/kaggle.py#L33-L36
import os import pandas as pd from io import StringIO from subprocess import getoutput from IPython import get_ipython import kora.data_table assert os.path.exists('/content/drive'), "You need to mount the drive first" assert os.path.exists('/content/drive/My Drive/kaggle.json'), "You need to create API token and store it as kaggle.json in your drive" os.makedirs('/root/.kaggle', exist_ok=True) os.system("cp 'drive/My Drive/kaggle.json' /root/.kaggle/") os.chmod('/root/.kaggle/kaggle.json', 0o600) def _show_csv(csv): buf = StringIO(csv) if csv.startswith('Warning:'): buf.readline() return pd.read_csv(buf) def search(query): cmd = 'kaggle datasets list -v -s '+query return _show_csv(getoutput(cmd))
MIT License
pyviz-dev/nbsite
examples/sites/holoviews/holoviews/streams.py
Stream.add_subscriber
python
def add_subscriber(self, subscriber, precedence=0): if not callable(subscriber): raise TypeError('Subscriber must be a callable.') self._subscribers.append((precedence, subscriber))
Register a callable subscriber to this stream which will be invoked either when event is called or when this stream is passed to the trigger classmethod. Precedence allows the subscriber ordering to be controlled. Users should only add subscribers with precedence between zero and one while HoloViews itself reserves the use of higher precedence values. Subscribers with high precedence are invoked later than ones with low precedence.
https://github.com/pyviz-dev/nbsite/blob/7a4752e6ed6a3b0c3698473a6dd3a71ff9ba2acb/examples/sites/holoviews/holoviews/streams.py#L232-L246
import uuid import math import param import numpy as np from numbers import Number from collections import defaultdict from .core import util from contextlib import contextmanager @contextmanager def triggering_streams(streams): for stream in streams: stream._triggering = True try: yield except: raise finally: for stream in streams: stream._triggering = False class Stream(param.Parameterized): registry = defaultdict(list) _callbacks = defaultdict(dict) @classmethod def define(cls, name, **kwargs): params = {'name':param.String(default=name)} for k,v in kwargs.items(): kws = dict(default=v, constant=True) if isinstance(v, param.Parameter): params[k] = v elif isinstance(v, bool): params[k] = param.Boolean(**kws) elif isinstance(v, int): params[k] = param.Integer(**kws) elif isinstance(v, float): params[k] = param.Number(**kws) elif isinstance(v,str): params[k] = param.String(**kws) elif isinstance(v,dict): params[k] = param.Dict(**kws) elif isinstance(v, tuple): params[k] = param.Tuple(**kws) elif isinstance(v,list): params[k] = param.List(**kws) elif isinstance(v,np.ndarray): params[k] = param.Array(**kws) else: params[k] = param.Parameter(**kws) return type(name, (Stream,), params) @classmethod def trigger(cls, streams): items = [stream.contents.items() for stream in streams] union = [kv for kvs in items for kv in kvs] klist = [k for k,_ in union] clashes = set([k for k in klist if klist.count(k) > 1]) if clashes: param.main.warning('Parameter name clashes for keys: %r' % clashes) subscriber_precedence = defaultdict(list) for stream in streams: for precedence, subscriber in stream._subscribers: subscriber_precedence[precedence].append(subscriber) sorted_subscribers = sorted(subscriber_precedence.items(), key=lambda x: x[0]) subscribers = util.unique_iterator([s for _, subscribers in sorted_subscribers for s in subscribers]) with triggering_streams(streams): for subscriber in subscribers: subscriber(**dict(union)) for stream in streams: with util.disable_constant(stream): if stream.transient: stream.reset() def __init__(self, rename={}, source=None, subscribers=[], linked=False, transient=False, **params): self._source = source self._subscribers = [] for subscriber in subscribers: self.add_subscriber(subscriber) self.linked = linked self._rename = self._validate_rename(rename) self.transient = transient self._triggering = False self._metadata = {} super(Stream, self).__init__(**params) if source is not None: self.registry[id(source)].append(self) @property def subscribers(self): return [s for p, s in sorted(self._subscribers, key=lambda x: x[0])] def clear(self, policy='all'): policies = ['all', 'user', 'internal'] if policy not in policies: raise ValueError('Policy for clearing subscribers must be one of %s' % policies) if policy == 'all': remaining = [] elif policy == 'user': remaining = [(p,s) for (p,s) in self._subscribers if p > 1] else: remaining = [(p,s) for (p,s) in self._subscribers if p <= 1] self._subscribers = remaining def reset(self): with util.disable_constant(self): for k, p in self.params().items(): if k != 'name': setattr(self, k, p.default)
BSD 3-Clause New or Revised License
salesforce/pomgen
crawl/workspace.py
Workspace.filter_artifact_producing_packages
python
def filter_artifact_producing_packages(self, packages): art_defs = [self.parse_maven_artifact_def(p) for p in packages] return [art_def.bazel_package for art_def in art_defs if art_def.pom_generation_mode.produces_artifact]
Given a list of packages, returns those that are actually producing a Maven artifact. This is based on the pom_generation_mode specified in the BUILD.pom file.
https://github.com/salesforce/pomgen/blob/4fb427c95c9dc35bfcf47f921e85d6be3876ef6c/crawl/workspace.py#L108-L117
from common import logger from crawl import artifactprocessor from crawl import bazel from crawl import buildpom from crawl import dependency from crawl import dependencymd class Workspace: def __init__(self, repo_root_path, excluded_dependency_paths, source_exclusions, maven_install_info, pom_content, verbose=False): self.repo_root_path = repo_root_path self.excluded_dependency_paths = excluded_dependency_paths self.source_exclusions = source_exclusions self.pom_content = pom_content self.verbose = verbose self.dependency_metadata = dependencymd.DependencyMetadata() self._name_to_ext_deps = self._parse_maven_install(maven_install_info, repo_root_path) self._package_to_artifact_def = {} @property def name_to_external_dependencies(self): return self._name_to_ext_deps def parse_maven_artifact_def(self, package): if package in self._package_to_artifact_def: return self._package_to_artifact_def[package] art_def = buildpom.parse_maven_artifact_def(self.repo_root_path, package) if art_def is not None: art_def = artifactprocessor.augment_artifact_def(self.repo_root_path, art_def, self.source_exclusions) self._package_to_artifact_def[package] = art_def return art_def def parse_dep_labels(self, dep_labels): deps = [] for label in dep_labels: dep = self._parse_dep_label(label) if dep is not None: deps.append(dep) return deps def normalize_deps(self, artifact_def, deps): updated_deps = [] for dep in deps: if dep.bazel_package is not None and dep.bazel_package == artifact_def.bazel_package: if artifact_def.pom_generation_mode.produces_artifact: continue updated_deps.append(dep) return updated_deps
BSD 3-Clause New or Revised License
lithium876/controll_remote_access_trojan
pyinstaller/PyInstaller/depend/dylib.py
mac_set_relative_dylib_deps
python
def mac_set_relative_dylib_deps(libname, distname): from PyInstaller.lib.macholib import util from PyInstaller.lib.macholib.MachO import MachO if os.path.basename(libname) in _BOOTLOADER_FNAMES: return parent_dir = '' if os.path.dirname(distname): parent_level = len(os.path.dirname(distname).split(os.sep)) parent_dir = parent_level * (os.pardir + os.sep) def match_func(pth): if not util.in_system_path(pth): return os.path.join('@loader_path', parent_dir, os.path.basename(pth)) dll = MachO(libname) dll.rewriteLoadCommands(match_func) try: f = open(dll.filename, 'rb+') for header in dll.headers: f.seek(0) dll.write(f) f.seek(0, 2) f.flush() f.close() except Exception: pass
On Mac OS X set relative paths to dynamic library dependencies of `libname`. Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH. There are known some issues with DYLD_LIBRARY_PATH. Relative paths is more flexible mechanism. Current location of dependend libraries is derived from the location of the library path (paths start with '@loader_path'). 'distname' path of the library relative to dist directory of frozen executable. We need this to determine the level of directory level for @loader_path of binaries not found in dist directory. E.g. qt4 plugins are not in the same directory as Qt*.dylib files. Without using '@loader_path/../..' for qt plugins Mac OS X would not be able to resolve shared library dependencies and qt plugins will not be loaded.
https://github.com/lithium876/controll_remote_access_trojan/blob/7ba48b51d98723e0dd0bca7d0e2586d422f78419/pyinstaller/PyInstaller/depend/dylib.py#L177-L242
__all__ = ['exclude_list', 'include_list', 'include_library'] import os import re from PyInstaller import is_win, is_unix, is_aix, is_darwin from PyInstaller.compat import set import PyInstaller.log as logging logger = logging.getLogger('PyInstaller.build.dylib') _BOOTLOADER_FNAMES = set(['run', 'run_d', 'runw', 'runw_d']) _excludes = {} _includes = {} _win_excludes = { r'^Microsoft\.Windows\.Common-Controls$': 1, } _unix_excludes = { r'/libc\.so\..*': 1, r'/libdl\.so\..*': 1, r'/libm\.so\..*': 1, r'/libpthread\.so\..*': 1, r'/librt\.so\..*': 1, r'/libthread_db\.so\..*': 1, r'/libdb-.*\.so': 1, r'/ld-linux\.so\..*': 1, r'/libBrokenLocale\.so\..*': 1, r'/libanl\.so\..*': 1, r'/libcidn\.so\..*': 1, r'/libcrypt\.so\..*': 1, r'/libnsl\.so\..*': 1, r'/libnss_compat.*\.so\..*': 1, r'/libnss_dns.*\.so\..*': 1, r'/libnss_files.*\.so\..*': 1, r'/libnss_hesiod.*\.so\..*': 1, r'/libnss_nis.*\.so\..*': 1, r'/libnss_nisplus.*\.so\..*': 1, r'/libresolv\.so\..*': 1, r'/libutil\.so\..*': 1, r'/libGL\..*': 1, } _aix_excludes = { r'/libbz2\.a': 1, r'/libc\.a': 1, r'/libC\.a': 1, r'/libcrypt\.a': 1, r'/libdl\.a': 1, r'/libintl\.a': 1, r'/libpthreads\.a': 1, r'/librt\\.a': 1, r'/librtl\.a': 1, r'/libz\.a': 1, } if is_win: _excludes = _win_excludes from PyInstaller.utils import winutils sep = '[%s]' % re.escape(os.sep + os.altsep) windir = re.escape(winutils.get_windows_dir()) _excludes['^%s%s' % (windir, sep)] = 1 _includes[r'%spy(?:thon(?:com(?:loader)?)?|wintypes)\d+\.dll$' % sep] = 1 elif is_aix: _excludes = _aix_excludes elif is_unix: _excludes = _unix_excludes class ExcludeList(object): def __init__(self): self.regex = re.compile('|'.join(_excludes.keys()), re.I) def search(self, libname): if _excludes: return self.regex.search(libname) else: return False class IncludeList(object): def __init__(self): self.regex = re.compile('|'.join(_includes.keys()), re.I) def search(self, libname): if _includes: return self.regex.search(libname) else: return False exclude_list = ExcludeList() include_list = IncludeList() if is_darwin: from PyInstaller.lib.macholib import util class MacExcludeList(object): def search(self, libname): return util.in_system_path(libname) exclude_list = MacExcludeList() def include_library(libname): if exclude_list: if exclude_list.search(libname) and not include_list.search(libname): return False else: return True else: return True
Apache License 2.0
weasyl/weasyl
libweasyl/libweasyl/cache.py
ThreadCacheProxy.get_multi
python
def get_multi(self, keys): d = self._dict to_fetch = [] ret = [] for key in keys: ret.append(d.get(key, NO_VALUE)) if ret[-1] is NO_VALUE: to_fetch.append((key, len(ret) - 1)) if not to_fetch: return ret keys_to_fetch, indices = zip(*to_fetch) for key, index, value in zip(keys_to_fetch, indices, self.proxied.get_multi(keys_to_fetch)): if value is NO_VALUE: continue d[key] = ret[index] = value return ret
Proxy a ``get_multi`` call. This works like :py:meth:`.get`, except *keys* is a list of keys, and the result is a list of values. Parameters: keys: A list of :term:`native string` objects. Returns: list: The values corresponding to the *keys*.
https://github.com/weasyl/weasyl/blob/80c86942c6f20a815086e2895fdad51d3aa77eed/libweasyl/libweasyl/cache.py#L89-L116
import json import threading import dogpile.cache import dogpile.cache.backends.memcached import pylibmc from dogpile.cache.api import CachedValue, NO_VALUE from dogpile.cache.proxy import ProxyBackend from dogpile.cache import make_region region = make_region() class ThreadCacheProxy(ProxyBackend): _local = threading.local() @classmethod def zap_cache(cls): try: del cls._local.cache_dict except AttributeError: pass @property def _dict(self): if not hasattr(self._local, 'cache_dict'): self._local.cache_dict = {} return self._local.cache_dict def get(self, key): d = self._dict if key in d: return d[key] ret = self.proxied.get(key) if ret is not NO_VALUE: d[key] = ret return ret
Apache License 2.0
dapr/python-sdk
dapr/clients/grpc/client.py
DaprGrpcClient.wait
python
def wait(self, timeout_s: float): host_port_str = self._address.split(":") host_port = (host_port_str[0], int(host_port_str[1])) start = time.time() while True: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.settimeout(timeout_s) try: s.connect(host_port) return except Exception as e: remaining = (start + timeout_s) - time.time() if remaining < 0: raise e time.sleep(min(1, remaining))
Waits for sidecar to be available within the timeout. It checks if sidecar socket is available within the given timeout. The example gets a secret from secret store: from dapr.clients import DaprClient with DaprClient() as d: d.wait(1000) # waits for 1 second. # Sidecar is available after this. Args: timeout_s (float): timeout in seconds
https://github.com/dapr/python-sdk/blob/3ac8416559338dffb04b900d4ebdd201a2672960/dapr/clients/grpc/client.py#L791-L821
import time import socket import grpc from grpc import ( UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor, StreamUnaryClientInterceptor, StreamStreamClientInterceptor ) from dapr.clients.grpc._state import StateOptions, StateItem from typing import Dict, Optional, Union, Sequence, List from google.protobuf.message import Message as GrpcMessage from google.protobuf.empty_pb2 import Empty as GrpcEmpty from dapr.conf import settings from dapr.proto import api_v1, api_service_v1, common_v1 from dapr.clients.grpc._helpers import MetadataTuple, DaprClientInterceptor, to_bytes from dapr.clients.grpc._request import ( InvokeMethodRequest, BindingRequest, TransactionalStateOperation, ) from dapr.clients.grpc._response import ( BindingResponse, DaprResponse, GetSecretResponse, GetBulkSecretResponse, InvokeMethodResponse, StateResponse, BulkStatesResponse, BulkStateItem, ) from urllib.parse import urlencode class DaprGrpcClient: def __init__( self, address: Optional[str] = None, interceptors: Optional[List[Union[ UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor, StreamUnaryClientInterceptor, StreamStreamClientInterceptor]]] = None): if not address: address = f"{settings.DAPR_RUNTIME_HOST}:{settings.DAPR_GRPC_PORT}" self._address = address self._channel = grpc.insecure_channel(address) if settings.DAPR_API_TOKEN: api_token_interceptor = DaprClientInterceptor([ ('dapr-api-token', settings.DAPR_API_TOKEN), ]) self._channel = grpc.intercept_channel( self._channel, api_token_interceptor) if interceptors: self._channel = grpc.intercept_channel( self._channel, *interceptors) self._stub = api_service_v1.DaprStub(self._channel) def close(self): if self._channel: self._channel.close() def __del__(self): self.close() def __enter__(self) -> 'DaprGrpcClient': return self def __exit__(self, exc_type, exc_value, traceback) -> None: self.close() def _get_http_extension( self, http_verb: str, http_querystring: Optional[MetadataTuple] = () ) -> common_v1.HTTPExtension: verb = common_v1.HTTPExtension.Verb.Value(http_verb) http_ext = common_v1.HTTPExtension(verb=verb) if http_querystring is not None and len(http_querystring): http_ext.querystring = urlencode(http_querystring) return http_ext def invoke_method( self, app_id: str, method_name: str, data: Union[bytes, str, GrpcMessage], content_type: Optional[str] = None, metadata: Optional[MetadataTuple] = None, http_verb: Optional[str] = None, http_querystring: Optional[MetadataTuple] = None) -> InvokeMethodResponse: req_data = InvokeMethodRequest(data, content_type) http_ext = None if http_verb: http_ext = self._get_http_extension(http_verb, http_querystring) content_type = "" if req_data.content_type: content_type = req_data.content_type req = api_v1.InvokeServiceRequest( id=app_id, message=common_v1.InvokeRequest( method=method_name, data=req_data.proto, content_type=content_type, http_extension=http_ext) ) response, call = self._stub.InvokeService.with_call(req, metadata=metadata) resp_data = InvokeMethodResponse(response.data, response.content_type) resp_data.headers = call.initial_metadata() return resp_data def invoke_binding( self, binding_name: str, operation: str, data: Union[bytes, str], binding_metadata: Dict[str, str] = {}, metadata: Optional[MetadataTuple] = ()) -> BindingResponse: req_data = BindingRequest(data, binding_metadata) req = api_v1.InvokeBindingRequest( name=binding_name, data=req_data.data, metadata=req_data.binding_metadata, operation=operation ) response, call = self._stub.InvokeBinding.with_call(req, metadata=metadata) return BindingResponse( response.data, dict(response.metadata), call.initial_metadata()) def publish_event( self, pubsub_name: str, topic_name: str, data: Union[bytes, str], metadata: Optional[MetadataTuple] = (), data_content_type: Optional[str] = None) -> DaprResponse: if not isinstance(data, bytes) and not isinstance(data, str): raise ValueError(f'invalid type for data {type(data)}') req_data: bytes if isinstance(data, bytes): req_data = data else: if isinstance(data, str): req_data = data.encode('utf-8') content_type = "" if data_content_type: content_type = data_content_type req = api_v1.PublishEventRequest( pubsub_name=pubsub_name, topic=topic_name, data=req_data, data_content_type=content_type) _, call = self._stub.PublishEvent.with_call(req, metadata=metadata) return DaprResponse(call.initial_metadata()) def get_state( self, store_name: str, key: str, state_metadata: Optional[Dict[str, str]] = dict(), metadata: Optional[MetadataTuple] = ()) -> StateResponse: if not store_name or len(store_name) == 0 or len(store_name.strip()) == 0: raise ValueError("State store name cannot be empty") req = api_v1.GetStateRequest(store_name=store_name, key=key, metadata=state_metadata) response, call = self._stub.GetState.with_call(req, metadata=metadata) return StateResponse( data=response.data, etag=response.etag, headers=call.initial_metadata()) def get_bulk_state( self, store_name: str, keys: Sequence[str], parallelism: int = 1, states_metadata: Optional[Dict[str, str]] = dict(), metadata: Optional[MetadataTuple] = ()) -> BulkStatesResponse: if not store_name or len(store_name) == 0 or len(store_name.strip()) == 0: raise ValueError("State store name cannot be empty") req = api_v1.GetBulkStateRequest( store_name=store_name, keys=keys, parallelism=parallelism, metadata=states_metadata) response, call = self._stub.GetBulkState.with_call(req, metadata=metadata) items = [] for item in response.items: items.append( BulkStateItem( key=item.key, data=item.data, etag=item.etag, error=item.error)) return BulkStatesResponse( items=items, headers=call.initial_metadata()) def save_state( self, store_name: str, key: str, value: Union[bytes, str], etag: Optional[str] = None, options: Optional[StateOptions] = None, state_metadata: Optional[Dict[str, str]] = dict(), metadata: Optional[MetadataTuple] = ()) -> DaprResponse: if not isinstance(value, (bytes, str)): raise ValueError(f'invalid type for data {type(value)}') req_value = value if not store_name or len(store_name) == 0 or len(store_name.strip()) == 0: raise ValueError("State store name cannot be empty") if options is None: state_options = None else: state_options = options.get_proto() state = common_v1.StateItem( key=key, value=to_bytes(req_value), etag=common_v1.Etag(value=etag) if etag is not None else None, options=state_options, metadata=state_metadata) req = api_v1.SaveStateRequest(store_name=store_name, states=[state]) _, call = self._stub.SaveState.with_call(req, metadata=metadata) return DaprResponse( headers=call.initial_metadata()) def save_bulk_state( self, store_name: str, states: List[StateItem], metadata: Optional[MetadataTuple] = ()) -> DaprResponse: if not states or len(states) == 0: raise ValueError("States to be saved cannot be empty") if not store_name or len(store_name) == 0 or len(store_name.strip()) == 0: raise ValueError("State store name cannot be empty") req_states = [common_v1.StateItem( key=i.key, value=to_bytes(i.value), etag=common_v1.Etag(value=i.etag) if i.etag is not None else None, options=i.options, metadata=i.metadata) for i in states] req = api_v1.SaveStateRequest(store_name=store_name, states=req_states) _, call = self._stub.SaveState.with_call(req, metadata=metadata) return DaprResponse( headers=call.initial_metadata()) def execute_state_transaction( self, store_name: str, operations: Sequence[TransactionalStateOperation], transactional_metadata: Optional[Dict[str, str]] = dict(), metadata: Optional[MetadataTuple] = ()) -> DaprResponse: if not store_name or len(store_name) == 0 or len(store_name.strip()) == 0: raise ValueError("State store name cannot be empty") req_ops = [api_v1.TransactionalStateOperation( operationType=o.operation_type.value, request=common_v1.StateItem( key=o.key, value=to_bytes(o.data), etag=common_v1.Etag(value=o.etag) if o.etag is not None else None)) for o in operations] req = api_v1.ExecuteStateTransactionRequest( storeName=store_name, operations=req_ops, metadata=transactional_metadata) _, call = self._stub.ExecuteStateTransaction.with_call(req, metadata=metadata) return DaprResponse( headers=call.initial_metadata()) def delete_state( self, store_name: str, key: str, etag: Optional[str] = None, options: Optional[StateOptions] = None, state_metadata: Optional[Dict[str, str]] = dict(), metadata: Optional[MetadataTuple] = ()) -> DaprResponse: if not store_name or len(store_name) == 0 or len(store_name.strip()) == 0: raise ValueError("State store name cannot be empty") if options is None: state_options = None else: state_options = options.get_proto() etag_object = common_v1.Etag(value=etag) if etag is not None else None req = api_v1.DeleteStateRequest(store_name=store_name, key=key, etag=etag_object, options=state_options, metadata=state_metadata) _, call = self._stub.DeleteState.with_call(req, metadata=metadata) return DaprResponse( headers=call.initial_metadata()) def get_secret( self, store_name: str, key: str, secret_metadata: Optional[Dict[str, str]] = {}, metadata: Optional[MetadataTuple] = ()) -> GetSecretResponse: req = api_v1.GetSecretRequest( store_name=store_name, key=key, metadata=secret_metadata) response, call = self._stub.GetSecret.with_call(req, metadata=metadata) return GetSecretResponse( secret=response.data, headers=call.initial_metadata()) def get_bulk_secret( self, store_name: str, secret_metadata: Optional[Dict[str, str]] = {}, metadata: Optional[MetadataTuple] = ()) -> GetBulkSecretResponse: req = api_v1.GetBulkSecretRequest( store_name=store_name, metadata=secret_metadata) response, call = self._stub.GetBulkSecret.with_call(req, metadata=metadata) secrets_map = {} for key in response.data.keys(): secret_response = response.data[key] secrets_submap = {} for subkey in secret_response.secrets.keys(): secrets_submap[subkey] = secret_response.secrets[subkey] secrets_map[key] = secrets_submap return GetBulkSecretResponse( secrets=secrets_map, headers=call.initial_metadata())
MIT License
google/deluca
deluca/lung/envs/_generalized_stitched_sim_open_loop.py
loop_over_loader
python
def loop_over_loader(model_optimState_lrMult_loss, X_Y, optim, rollout, scheduler): X_batch, y_batch = X_Y model, optim_state, lr_mult, loss = model_optimState_lrMult_loss loss, grad = jax.value_and_grad(map_rollout_over_batch)(model, (X_batch, y_batch), rollout) updates, optim_state = optim.update(grad, optim_state, model) if scheduler == "ReduceLROnPlateau": updates = jax.tree_map(lambda g: lr_mult * g, updates) model = optax.apply_updates(model, updates) return (model, optim_state, lr_mult, loss), None
rollout has signature (model, data) -> loss where data.shape = (2, N) X_batch.shape = Y_batch.shape = (num_batches, batch_size, N=29) lrMult is the multiplier for the scheduler
https://github.com/google/deluca/blob/9fdcb9b382cae2ff9d8c7600469d2c6f1a128d1c/deluca/lung/envs/_generalized_stitched_sim_open_loop.py#L295-L310
from functools import partial from absl import logging from typing import Dict, Any import time import os import jax import jax.numpy as jnp import flax.linen as nn import optax import copy from flax.metrics import tensorboard import deluca.core from deluca.lung.core import LungEnv from deluca.lung.utils.data.transform import ShiftScaleTransform from deluca.lung.utils.nn import SNN from deluca.lung.utils.nn import MLP from deluca.lung.utils.nn import CNN from deluca.lung.utils.nn import LSTM from deluca.lung.utils.nn import ShallowBoundaryModel from google3.pyglib import gfile class StitchedSimObservation(deluca.Obj): predicted_pressure: float = 0.0 time: float = 0.0 class SimulatorState(deluca.Obj): u_history: jnp.ndarray p_history: jnp.ndarray t_in: int = 0 steps: int = 0 predicted_pressure: float = 0.0 class GeneralizedStitchedSim_open_loop(LungEnv): params: list = deluca.field(jaxed=True) init_rng: jnp.array = deluca.field(jaxed=False) u_window: int = deluca.field(5, jaxed=False) p_window: int = deluca.field(3, jaxed=False) u_history_len: int = deluca.field(5, jaxed=False) p_history_len: int = deluca.field(5, jaxed=False) u_scaler: ShiftScaleTransform = deluca.field(jaxed=False) p_scaler: ShiftScaleTransform = deluca.field(jaxed=False) to_round: bool = deluca.field(False, jaxed=False) seed: int = deluca.field(0, jaxed=False) flow: int = deluca.field(0, jaxed=False) transition_threshold: int = deluca.field(0, jaxed=False) default_model_parameters: Dict[str, Any] = deluca.field(jaxed=False) num_boundary_models: int = deluca.field(5, jaxed=False) boundary_out_dim: int = deluca.field(1, jaxed=False) boundary_hidden_dim: int = deluca.field(100, jaxed=False) reset_scaled_peep: float = deluca.field(0.0, jaxed=False) default_model_name: str = deluca.field("SNN", jaxed=False) default_model: nn.module = deluca.field(jaxed=False) boundary_models: list = deluca.field(default_factory=list, jaxed=False) ensemble_models: list = deluca.field(default_factory=list, jaxed=False) def setup(self): self.u_history_len = max(self.u_window, self.num_boundary_models) self.p_history_len = max(self.p_window, self.num_boundary_models) if self.default_model_name == "SNN": self.default_model = SNN( out_dim=self.default_model_parameters["out_dim"], hidden_dim=self.default_model_parameters["hidden_dim"], n_layers=self.default_model_parameters["n_layers"], droprate=self.default_model_parameters["droprate"]) elif self.default_model_name == "MLP": self.default_model = MLP( hidden_dim=self.default_model_parameters["hidden_dim"], out_dim=self.default_model_parameters["out_dim"], n_layers=self.default_model_parameters["n_layers"], droprate=self.default_model_parameters["droprate"], activation_fn=self.default_model_parameters["activation_fn"]) elif self.default_model_name == "CNN": self.default_model = CNN( n_layers=self.default_model_parameters["n_layers"], out_channels=self.default_model_parameters["out_channels"], kernel_size=self.default_model_parameters["kernel_size"], strides=self.default_model_parameters["strides"], out_dim=self.default_model_parameters["out_dim"], activation_fn=self.default_model_parameters["activation_fn"]) elif self.default_model_name == "LSTM": self.default_model = LSTM( n_layers=self.default_model_parameters["n_layers"], hidden_dim=self.default_model_parameters["hidden_dim"], out_dim=self.default_model_parameters["out_dim"], bptt=self.default_model_parameters["bptt"], activation_fn=self.default_model_parameters["activation_fn"]) if self.default_model_name == "SNN" or self.default_model_name == "MLP": default_params = self.default_model.init( jax.random.PRNGKey(0), jnp.ones([self.u_history_len + self.p_history_len]))["params"] elif self.default_model_name == "CNN": default_params = self.default_model.init( jax.random.PRNGKey(0), jnp.ones([1, self.u_history_len + self.p_history_len, 1]))["params"] logging.info('[1, self.u_history_len + self.p_history_len, 1]:' + str([1, self.u_history_len + self.p_history_len, 1])) elif self.default_model_name == "LSTM": default_params = self.default_model.init( jax.random.PRNGKey(0), jnp.ones((1, self.u_history_len + self.p_history_len, 1)))["params"] self.boundary_models = [ ShallowBoundaryModel( out_dim=self.boundary_out_dim, hidden_dim=self.boundary_hidden_dim, model_num=i + 1) for i in range(self.num_boundary_models) ] boundary_params = [ self.boundary_models[i].init( jax.random.PRNGKey(0), jnp.ones([self.u_history_len + self.p_history_len]))["params"] for i in range(self.num_boundary_models) ] self.ensemble_models = self.boundary_models + [self.default_model] if self.params is None: self.params = boundary_params + [default_params] logging.info("TREE MAP") logging.info(jax.tree_map(lambda x: x.shape, self.params)) def reset(self): scaled_peep = self.reset_scaled_peep state = SimulatorState( u_history=jnp.zeros([self.u_history_len]), p_history=jnp.hstack( [jnp.zeros([self.p_history_len-1]), jnp.array([scaled_peep])]), t_in=0, steps=0, predicted_pressure=scaled_peep, ) state = self.sync_unscaled_pressure(state) obs = StitchedSimObservation( predicted_pressure=state.predicted_pressure, time=self.time(state)) return state, obs def update_history(self, history, value): history = jnp.roll(history, shift=-1) history = history.at[-1].set(value) return history def sync_unscaled_pressure(self, state): scaled_pressure = self.p_scaler.inverse(state.predicted_pressure).squeeze() return state.replace(predicted_pressure=scaled_pressure) def __call__(self, state, action): u_in, u_out = action model_idx = jnp.min(jnp.array([state.t_in, self.num_boundary_models])) funcs = [ partial(self.ensemble_models[i].apply, {"params": self.params[i]}) for i in range(self.num_boundary_models + 1) ] def true_func(state): def call_if_t_in_positive(state): new_state, _ = self.reset() return new_state state = jax.lax.cond(state.t_in > 0, call_if_t_in_positive, lambda x: x, state) return state def false_func(state, u_in, model_idx): if self.to_round: u_in_scaled = self.u_scaler(jnp.round(u_in)).squeeze() else: u_in_scaled = self.u_scaler(u_in).squeeze() state = state.replace( u_history=self.update_history(state.u_history, u_in_scaled), t_in=state.t_in + 1) boundary_features = jnp.hstack([ state.u_history[-self.u_history_len:], state.p_history[-self.p_history_len:] ]) default_features = jnp.hstack( [state.u_history[-self.u_window:], state.p_history[-self.p_window:]]) default_pad_len = (self.u_history_len + self.p_history_len) - ( self.u_window + self.p_window) default_features = jnp.hstack( [jnp.zeros((default_pad_len,)), default_features]) if self.default_model_name == "CNN": boundary_features = jnp.expand_dims(boundary_features, axis=[0, 2]) default_features = jnp.expand_dims(default_features, axis=[0, 2]) if self.default_model_name == "LSTM": boundary_features = jnp.expand_dims(boundary_features, axis=[0, 2]) default_features = jnp.expand_dims(default_features, axis=[0, 2]) features = jax.lax.cond( model_idx == self.num_boundary_models, lambda x: default_features, lambda x: boundary_features, None, ) if self.default_model_name != "LSTM": scaled_pressure = jax.lax.switch(model_idx, funcs, features) else: scaled_pressure = jax.lax.switch(model_idx, funcs, features) scaled_pressure = scaled_pressure.astype(jnp.float64) state = state.replace(predicted_pressure=scaled_pressure) new_p_history = self.update_history(state.p_history, scaled_pressure) state = state.replace( p_history=new_p_history ) state = self.sync_unscaled_pressure(state) return state partial_false_func = partial(false_func, u_in=u_in, model_idx=model_idx) state = jax.lax.cond(u_out == 1, true_func, partial_false_func, state) state = state.replace(steps=state.steps + 1) obs = StitchedSimObservation( predicted_pressure=state.predicted_pressure, time=self.time(state)) return state, obs def get_X_y_for_next_epoch_tf(loader, batch_size): loader = loader.shuffle(buffer_size=len(loader), seed=0) loader = loader.batch(batch_size=batch_size, drop_remainder=True) loader_list = list(loader) unzipped_loader = list(zip(*loader_list)) X = jnp.array([jnp.array(z.numpy()) for z in unzipped_loader[0]]) y = jnp.array([jnp.array(z.numpy()) for z in unzipped_loader[1]]) return X, y def rollout(model, data): start_idx = 0 end_idx = len(data[0])-1 state, _ = model.reset() new_u_history = jnp.zeros((model.u_history_len,)) new_p_history = jnp.zeros((model.p_history_len,)) state = state.replace(u_history=new_u_history, p_history=new_p_history) loss_init = jnp.abs(model.p_scaler(state.predicted_pressure) - model.p_scaler(data[1, 0])) state_loss_init = (state, loss_init) def predict_and_update_state(i, state_loss): state, loss = state_loss u_in, pressure = data[0, i], data[1, i] def true_func(state, u_in, pressure): new_p_history = state.p_history.at[-1].set( model.p_scaler(pressure).squeeze()) state = state.replace(p_history=new_p_history) next_state, _ = model(state=state, action=(u_in, 0)) return next_state def false_func(state, u_in): next_state, _ = model(state=state, action=(u_in, 0)) return next_state partial_true_func = partial(true_func, u_in=u_in, pressure=pressure) partial_false_func = partial(false_func, u_in=u_in) next_state = jax.lax.cond(i >= model.transition_threshold, partial_true_func, partial_false_func, state) pred = model.p_scaler(next_state.predicted_pressure) return (next_state, loss + jnp.abs(model.p_scaler(data[1, i + 1]) - pred)) (state, total_loss) = jax.lax.fori_loop(start_idx, end_idx, predict_and_update_state, state_loss_init) '''state_loss = state_loss_init for i in range(start_idx, end_idx): state_loss = predict_and_update_state(i, state_loss) (_, total_loss) = state_loss''' return total_loss / len(data[0])
Apache License 2.0
pyansys/pyaedt
pyaedt/siwave.py
Siwave.project_path
python
def project_path(self): return os.path.normpath(self.oSiwave.GetProjectDirectory())
Project path. Returns ------- str Full absolute path for the project.
https://github.com/pyansys/pyaedt/blob/817c7d706a2d10942470ccac959645e16e9ea971/pyaedt/siwave.py#L159-L168
from __future__ import absolute_import from .generic.general_methods import aedt_exception_handler import os import sys import pkgutil import time from .misc import list_installed_ansysem from pyaedt import is_ironpython, _pythonver if is_ironpython: import clr _com = "pythonnet" import System elif os.name == "nt": modules = [tup[1] for tup in pkgutil.iter_modules()] if "clr" in modules: import clr import win32com.client _com = "pythonnet_v3" elif "win32com" in modules: import win32com.client _com = "pywin32" else: raise Exception("Error. No win32com.client or Python.NET modules found. They need to be installed.") class Siwave: @property def version_keys(self): self._version_keys = [] self._version_ids = {} version_list = list_installed_ansysem() for version_env_var in version_list: current_version_id = version_env_var.replace("ANSYSEM_ROOT", "").replace("ANSYSEMSV_ROOT", "") version = int(current_version_id[0:2]) release = int(current_version_id[2]) if version < 20: if release < 3: version -= 1 else: release -= 2 v_key = "20{0}.{1}".format(version, release) self._version_keys.append(v_key) self._version_ids[v_key] = version_env_var return self._version_keys @property def current_version(self): return self.version_keys[0] def __init__(self, specified_version=None): self._main = sys.modules["__main__"] print("Launching Siwave Init") if "oSiwave" in dir(self._main) and self._main.oSiwave is not None: self._main.AEDTVersion = self._main.oSiwave.GetVersion()[0:6] self._main.oSiwave.RestoreWindow() specified_version = self.current_version assert specified_version in self.version_keys, "Specified version {} not known.".format(specified_version) version_key = specified_version base_path = os.getenv(self._version_ids[specified_version]) self._main.sDesktopinstallDirectory = base_path else: if specified_version: assert specified_version in self.version_keys, "Specified version {} not known.".format( specified_version ) version_key = specified_version else: version_key = self.current_version base_path = os.getenv(self._version_ids[version_key]) self._main = sys.modules["__main__"] self._main.sDesktopinstallDirectory = base_path version = "Siwave.Application." + version_key self._main.AEDTVersion = version_key self._main.interpreter = _com self._main.interpreter_ver = _pythonver if "oSiwave" in dir(self._main): del self._main.oSiwave if _com == "pythonnet": self._main.oSiwave = System.Activator.CreateInstance(System.Type.GetTypeFromProgID(version)) elif _com == "pythonnet_v3": print("Launching Siwave with Module win32com") self._main.oSiwave = win32com.client.Dispatch("Siwave.Application.2021.1") self._main.AEDTVersion = version_key self.oSiwave = self._main.oSiwave self._main.oSiwave.RestoreWindow() self._main.siwave_initialized = True self._oproject = self.oSiwave.GetActiveProject() pass @property def project_name(self): return self._oproject.GetName() @property
MIT License
iterative/dvc
dvc/fs/base.py
BaseFileSystem.walk_files
python
def walk_files(self, path_info, **kwargs): raise NotImplementedError
Return a generator with `PathInfo`s to all the files. Optional kwargs: prefix (bool): If true `path_info` will be treated as a prefix rather than directory path.
https://github.com/iterative/dvc/blob/3a100382bc5d50a4f1243b1c5d894bb5d7058dbf/dvc/fs/base.py#L169-L176
import contextlib import logging import os from concurrent.futures import ThreadPoolExecutor, as_completed from functools import partialmethod from multiprocessing import cpu_count from typing import Any, ClassVar, Dict, FrozenSet, Optional from tqdm.utils import CallbackIOWrapper from dvc.exceptions import DvcException from dvc.path_info import URLInfo from dvc.progress import DEFAULT_CALLBACK, FsspecCallback from dvc.ui import ui from dvc.utils import tmp_fname from dvc.utils.fs import makedirs, move logger = logging.getLogger(__name__) class RemoteCmdError(DvcException): def __init__(self, remote, cmd, ret, err): super().__init__( "{remote} command '{cmd}' finished with non-zero return code" " {ret}': {err}".format(remote=remote, cmd=cmd, ret=ret, err=err) ) class RemoteActionNotImplemented(DvcException): def __init__(self, action, scheme): m = f"{action} is not supported for {scheme} remotes" super().__init__(m) class RemoteMissingDepsError(DvcException): pass class BaseFileSystem: sep = "/" scheme = "base" REQUIRES: ClassVar[Dict[str, str]] = {} PATH_CLS = URLInfo _JOBS = 4 * cpu_count() CHECKSUM_DIR_SUFFIX = ".dir" HASH_JOBS = max(1, min(4, cpu_count() // 2)) LIST_OBJECT_PAGE_SIZE = 1000 TRAVERSE_WEIGHT_MULTIPLIER = 5 TRAVERSE_PREFIX_LEN = 3 TRAVERSE_THRESHOLD_SIZE = 500000 CAN_TRAVERSE = True CHUNK_SIZE = 64 * 1024 * 1024 PARAM_CHECKSUM: ClassVar[Optional[str]] = None DETAIL_FIELDS: FrozenSet[str] = frozenset() def __init__(self, **kwargs): self._check_requires(**kwargs) self.jobs = kwargs.get("jobs") or self._JOBS self.hash_jobs = kwargs.get("checksum_jobs") or self.HASH_JOBS self._config = kwargs @property def config(self): return self._config @classmethod def _strip_protocol(cls, path: str): return path @staticmethod def _get_kwargs_from_urls(urlpath): return {} @classmethod def get_missing_deps(cls): import importlib missing = [] for package, module in cls.REQUIRES.items(): try: importlib.import_module(module) except ImportError: missing.append(package) return missing def _check_requires(self, **kwargs): from ..scheme import Schemes from ..utils import format_link from ..utils.pkg import PKG missing = self.get_missing_deps() if not missing: return url = kwargs.get("url", f"{self.scheme}://") scheme = self.scheme if scheme == Schemes.WEBDAVS: scheme = Schemes.WEBDAV by_pkg = { "pip": f"pip install 'dvc[{scheme}]'", "conda": f"conda install -c conda-forge dvc-{scheme}", } cmd = by_pkg.get(PKG) if cmd: link = format_link("https://dvc.org/doc/install") hint = ( f"To install dvc with those dependencies, run:\n" "\n" f"\t{cmd}\n" "\n" f"See {link} for more info." ) else: link = format_link("https://github.com/iterative/dvc/issues") hint = f"Please report this bug to {link}. Thank you!" raise RemoteMissingDepsError( f"URL '{url}' is supported but requires these missing " f"dependencies: {missing}. {hint}" ) def checksum(self, path_info) -> str: raise NotImplementedError def open(self, path_info, mode: str = "r", encoding: str = None, **kwargs): raise RemoteActionNotImplemented("open", self.scheme) def exists(self, path_info) -> bool: raise NotImplementedError def isdir(self, path_info): return False def isfile(self, path_info): return True def isexec(self, path_info): return False def iscopy(self, path_info): return False def walk(self, top, topdown=True, onerror=None, **kwargs): raise NotImplementedError
Apache License 2.0
riotgames/cloud-inquisitor
backend/cloud_inquisitor/utils.py
to_camelcase
python
def to_camelcase(inStr): return re.sub('_([a-z])', lambda x: x.group(1).upper(), inStr)
Converts a string from snake_case to camelCase >>> to_camelcase('convert_to_camel_case') 'convertToCamelCase' Args: inStr (str): String to convert Returns: String formatted as camelCase
https://github.com/riotgames/cloud-inquisitor/blob/29a26c705381fdba3538b4efedb25b9e09b387ed/backend/cloud_inquisitor/utils.py#L380-L392
import binascii import hashlib import json import logging import os import random import re import string import time import zlib from base64 import b64decode from collections import namedtuple from copy import deepcopy from datetime import datetime from difflib import Differ from functools import wraps import boto3.session import jwt import munch import pkg_resources import requests from argon2 import PasswordHasher from dateutil import parser from jinja2 import Environment, BaseLoader from cloud_inquisitor.constants import RGX_EMAIL_VALIDATION_PATTERN, RGX_BUCKET, ROLE_ADMIN, DEFAULT_CONFIG, CONFIG_FILE_PATHS from cloud_inquisitor.exceptions import InquisitorError __jwt_data = None log = logging.getLogger(__name__) NotificationContact = namedtuple('NotificationContact', ('type', 'value')) class MenuItem(object): def __init__(self, group=None, name=None, state=None, active=None, section=None, args=None, order=100): self.group = group self.name = name self.state = state self.active = active self.section = section self.args = args or {} self.order = order def to_json(self): return { 'group': self.group, 'name': self.name, 'state': self.state, 'active': self.active, 'section': self.section, 'args': self.args or {}, 'order': self.order } def deprecated(msg): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): logging.getLogger(__name__).warning(msg) return func(*args, **kwargs) return wrapper return decorator def get_hash(data): return hashlib.sha256(str(data).encode('utf-8')).hexdigest() def is_truthy(value, default=False): if value is None: return False if isinstance(value, bool): return value if isinstance(value, int): return value > 0 trues = ('1', 'true', 'y', 'yes', 'ok') falses = ('', '0', 'false', 'n', 'none', 'no') if value.lower().strip() in falses: return False elif value.lower().strip() in trues: return True else: if default: return default else: raise ValueError('Invalid argument given to truthy: {0}'.format(value)) def validate_email(email, partial_match=False): rgx = re.compile(RGX_EMAIL_VALIDATION_PATTERN, re.I) if partial_match: return rgx.search(email) is not None else: return rgx.match(email) is not None def get_template(template): from cloud_inquisitor.database import db tmpl = db.Template.find_one(template_name=template) if not tmpl: raise InquisitorError('No such template found: {}'.format(template)) tmplenv = Environment(loader=BaseLoader, autoescape=True) tmplenv.filters['json_loads'] = json.loads tmplenv.filters['slack_quote_join'] = lambda data: ', '.join('`{}`'.format(x) for x in data) return tmplenv.from_string(tmpl.template) def parse_bucket_info(domain): match = RGX_BUCKET.match(domain) if match: data = match.groupdict() return data['bucket'], data['region'] or 'us-east-1' def to_utc_date(date): return datetime.utcfromtimestamp(float(date.strftime('%s'))).replace(tzinfo=None) if date else None def isoformat(date): return date.isoformat() if date else None def generate_password(length=32): return ''.join(random.SystemRandom().choice(string.ascii_letters + '!@#$+.,') for _ in range(length)) def generate_csrf_token(): return binascii.hexlify(os.urandom(32)).decode() def hash_password(password): return PasswordHasher().hash(password) def generate_jwt_token(user, authsys, **kwargs): from cloud_inquisitor.config import dbconfig token = { 'auth_system': authsys, 'exp': time.time() + dbconfig.get('session_expire_time'), 'roles': [role.name for role in user.roles] } if kwargs: token.update(**kwargs) enc = jwt.encode(token, get_jwt_key_data(), algorithm='HS512') return enc.decode() def get_jwt_key_data(): global __jwt_data if __jwt_data: return __jwt_data from cloud_inquisitor import config_path from cloud_inquisitor.config import dbconfig jwt_key_file = dbconfig.get('jwt_key_file_path', default='ssl/private.key') if not os.path.isabs(jwt_key_file): jwt_key_file = os.path.join(config_path, jwt_key_file) with open(os.path.join(jwt_key_file), 'r') as f: __jwt_data = f.read() return __jwt_data def has_access(user, required_roles, match_all=True): if ROLE_ADMIN in user.roles: return True if isinstance(required_roles, str): if required_roles in user.roles: return True return False if match_all: for role in required_roles: if role not in user.roles: return False return True else: for role in required_roles: if role in user.roles: return True return False def merge_lists(*args): out = {} for contacts in filter(None, args): for contact in contacts: out[contact.value] = contact return list(out.values())
Apache License 2.0
harpribot/deep-summarization
models/simple.py
Simple._train_batch
python
def _train_batch(self, review, summary): feed_dict = {self.enc_inp[t]: review[t] for t in range(self.seq_length)} feed_dict.update({self.labels[t]: summary[t] for t in range(self.seq_length)}) _, loss_t = self.sess.run([self.train_op, self.loss], feed_dict) return loss_t
Train a batch of the data :param review: The input review data (X) shape[seq_length x batch_length] :param summary: The target tip data (Y) shape[seq_length x batch_length] :return: None
https://github.com/harpribot/deep-summarization/blob/9b3bb1daae11a1db2386dbe4a71848714e6127f8/models/simple.py#L225-L239
import tensorflow as tf from models.sequenceNet import NeuralNet from abc import abstractmethod, ABCMeta import cPickle as Pickle import numpy as np import random from helpers.data2tensor import Mapper class Simple(NeuralNet): __metaclass__ = ABCMeta def __init__(self, review_summary_file, checkpointer, attention=False): self.test_review = None self.predicted_test_summary = None self.true_summary = None self.train_size = None self.test_size = None self.X = None self.Y = None self.prev_mem = None self.cell = None self.dec_outputs = None self.dec_memory = None self.labels = None self.loss = None self.weights = None self.optimizer = None self.train_op = None self.mapper_dict = None self.seq_length = None self.vocab_size = None self.momentum = None self.attention = attention self.review_summary_file = review_summary_file self.checkpointer = checkpointer self.enc_inp = None self.dec_inp = None self._load_data() super(Simple, self).__init__() @abstractmethod def get_cell(self): pass def _split_train_tst(self): num_samples = self.Y.shape[0] mapper_file = self.checkpointer.get_mapper_file_location() if not self.checkpointer.is_mapper_checkpointed(): print 'No mapper checkpoint found. Fresh loading in progress ...' sample_id = range(num_samples) random.shuffle(sample_id) print 'Dumping the mapper shuffle for reuse.' Pickle.dump(sample_id, open(mapper_file, 'wb')) print 'Dump complete. Moving Forward...' else: print 'Mapper Checkpoint found... Reading from mapper dump' sample_id = Pickle.load(open(mapper_file, 'rb')) print 'Mapping unpickling complete.. Moving forward...' self.X = self.X[sample_id] self.Y = self.Y[sample_id] test_fraction = 0.01 self.test_size = int(test_fraction * num_samples) self.train_size = num_samples - self.test_size self.X_trn = self.X[0:self.train_size] self.X_tst = self.X[self.train_size:num_samples] self.Y_trn = self.Y[0:self.train_size] self.Y_tst = self.Y[self.train_size:num_samples] def _load_data(self): self.mapper = Mapper() self.mapper.generate_vocabulary(self.review_summary_file) self.X, self.Y = self.mapper.get_tensor() self.mapper_dict = dict() self.mapper_dict['seq_length'] = self.mapper.get_seq_length() self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size() self.mapper_dict['rev_map'] = self.mapper.get_reverse_map() self._split_train_tst() def _load_data_graph(self): with tf.variable_scope("train_test", reuse=True): self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1]) def _load_model(self): self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim)) with tf.variable_scope("train_test", reuse=True): self.cell = self.get_cell() if not self.attention: with tf.variable_scope("train_test"): self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_rnn_seq2seq( self.enc_inp, self.dec_inp, self.cell, self.vocab_size, self.vocab_size, self.seq_length) with tf.variable_scope("train_test", reuse=True): self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( self.enc_inp, self.dec_inp, self.cell, self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True) else: with tf.variable_scope("train_test"): self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_attention_seq2seq( self.enc_inp, self.dec_inp, self.cell, self.vocab_size, self.vocab_size, self.seq_length) with tf.variable_scope("train_test", reuse=True): self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_attention_seq2seq( self.enc_inp, self.dec_inp, self.cell, self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True) def _load_optimizer(self): self.loss = tf.nn.seq2seq.sequence_loss(self.dec_outputs, self.labels, self.weights, self.vocab_size) self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate) self.train_op = self.optimizer.minimize(self.loss) def fit(self): step_file = self.checkpointer.get_step_file() start_step = Pickle.load(open(step_file, 'rb')) for step in xrange(start_step, self.train_size // self.train_batch_size): print 'Step No.:', step if step % self.checkpointer.get_checkpoint_steps() == 0: print 'Checkpointing: Saving Tensorflow variables' self.saver.save(self.sess, self.checkpointer.get_save_address()) Pickle.dump(step + 1, open(step_file, 'wb')) print 'Checkpointing Complete. Deleting historical checkpoints....' self.checkpointer.delete_previous_checkpoints(num_previous=2) print 'Deleted.. Moving forward...' offset = (step * self.train_batch_size) % self.train_size batch_data = self.X_trn[offset:(offset + self.train_batch_size), :].T batch_labels = self.Y_trn[offset:(offset + self.train_batch_size), :].T loss_t = self._train_batch(batch_data, batch_labels) print "Present Loss:", loss_t print 'Train Data Validation\n' self._visual_validate(self.X_trn[301, :], self.Y_trn[301, :]) print print print 'Test Data Validation\n' self._visual_validate(self.X_tst[56, :], self.Y_tst[56, :]) print print ''' if(step % self.checkpointer.get_prediction_checkpoint_steps() == 0): self.predict() self.store_test_predictions('_' + str(step)) '''
MIT License
cartus/dcgcn
sockeye/config.py
Config.__add_frozen
python
def __add_frozen(self): setattr(self, "_frozen", False) for attr, val in self.__dict__.items(): if isinstance(val, Config): val.__add_frozen()
Adds _frozen attribute to this instance and all its child configurations.
https://github.com/cartus/dcgcn/blob/af91fc787e0aed3ef20e143c2deba70c3c5f309a/sockeye/config.py#L90-L97
import copy import inspect import yaml class TaggedYamlObjectMetaclass(yaml.YAMLObjectMetaclass): def __init__(cls, name, bases, kwds): cls.yaml_tag = "!" + name new_kwds = {} new_kwds.update(kwds) new_kwds['yaml_tag'] = "!" + name super().__init__(name, bases, new_kwds) class Config(yaml.YAMLObject, metaclass=TaggedYamlObjectMetaclass): def __init__(self): self.__add_frozen() def __setattr__(self, key, value): if hasattr(self, '_frozen') and getattr(self, '_frozen'): raise AttributeError("Cannot set '%s' in frozen config" % key) if value == self: raise AttributeError("Cannot set self as attribute") object.__setattr__(self, key, value) def __setstate__(self, state): self.__dict__.update(state) init_signature = inspect.signature(self.__init__) for param_name, param in init_signature.parameters.items(): if param.default is not param.empty: if not hasattr(self, param_name): object.__setattr__(self, param_name, param.default) def freeze(self): if getattr(self, '_frozen'): return object.__setattr__(self, "_frozen", True) for k, v in self.__dict__.items(): if isinstance(v, Config) and k != "self": v.freeze() def __repr__(self): return "Config[%s]" % ", ".join("%s=%s" % (str(k), str(v)) for k, v in sorted(self.__dict__.items())) def __eq__(self, other): if type(other) is not type(self): return False for k, v in self.__dict__.items(): if k != "self": if k not in other.__dict__: return False if self.__dict__[k] != other.__dict__[k]: return False return True def __del_frozen(self): self.__delattr__('_frozen') for attr, val in self.__dict__.items(): if isinstance(val, Config) and hasattr(val, '_frozen'): val.__del_frozen()
MIT License
tcalmant/ipopo
pelix/http/basic.py
_RequestHandler.log_request
python
def log_request(self, code="-", size="-"): self._service.log(logging.DEBUG, '"%s" %s', self.requestline, code)
Logs a request to the server
https://github.com/tcalmant/ipopo/blob/1d4b81207e67890dfccc8f562336c7104f194c17/pelix/http/basic.py#L322-L326
import logging import socket import threading import traceback try: from http.server import HTTPServer from http.server import BaseHTTPRequestHandler from socketserver import ThreadingMixIn, TCPServer except ImportError: from BaseHTTPServer import HTTPServer from BaseHTTPServer import BaseHTTPRequestHandler from SocketServer import ThreadingMixIn, TCPServer from pelix.ipopo.decorators import ( ComponentFactory, Provides, Requires, Validate, Invalidate, Property, HiddenProperty, BindField, UpdateField, UnbindField, ) import pelix.ipopo.constants as constants import pelix.ipv6utils import pelix.utilities as utilities import pelix.misc.ssl_wrap as ssl_wrap import pelix.remote import pelix.http as http __version_info__ = (1, 0, 1) __version__ = ".".join(str(x) for x in __version_info__) __docformat__ = "restructuredtext en" HTTP_SERVICE_EXTRA = "http.extra" DEFAULT_BIND_ADDRESS = "0.0.0.0" LOCALHOST_ADDRESS = "127.0.0.1" class _HTTPServletRequest(http.AbstractHTTPServletRequest): def __init__(self, request_handler, prefix): self._handler = request_handler self._prefix = prefix self._sub_path = self._handler.path[len(prefix) :] if not self._sub_path.startswith("/"): self._sub_path = "/{0}".format(self._sub_path) while "//" in self._sub_path: self._sub_path = self._sub_path.replace("//", "/") def get_command(self): return self._handler.command def get_client_address(self): return self._handler.client_address def get_header(self, name, default=None): return self._handler.headers.get(name, default) def get_headers(self): return self._handler.headers def get_path(self): return self._handler.path def get_prefix_path(self): return self._prefix def get_sub_path(self): return self._sub_path def get_rfile(self): return self._handler.rfile class _HTTPServletResponse(http.AbstractHTTPServletResponse): def __init__(self, request_handler): self._handler = request_handler self._headers = {} def set_response(self, code, message=None): self._handler.send_response(code, message) def set_header(self, name, value): self._headers[name.lower()] = value def is_header_set(self, name): return name.lower() in self._headers def end_headers(self): for name, value in self._headers.items(): self._handler.send_header(name, value) self._handler.end_headers() def get_wfile(self): return self._handler.wfile def write(self, data): self._handler.wfile.write(data) class _RequestHandler(BaseHTTPRequestHandler, object): default_request_version = "HTTP/1.0" def __init__(self, http_svc, *args, **kwargs): self._service = http_svc BaseHTTPRequestHandler.__init__(self, *args, **kwargs) def __getattr__(self, name): if not name.startswith("do_"): return object.__getattribute__(self, name) parsed_path = self.path.split("?", 1)[0].replace("//", "/") found_servlet = self._service.get_servlet(parsed_path) if found_servlet is not None: servlet, _, prefix = found_servlet if hasattr(servlet, name): request = _HTTPServletRequest(self, prefix) response = _HTTPServletResponse(self) def wrapper(): try: return getattr(servlet, name)(request, response) except: return self.send_exception(response) return wrapper return self.send_no_servlet_response def log_error(self, message, *args, **kwargs): self._service.log(logging.ERROR, message, *args, **kwargs)
Apache License 2.0
mrod5/pyturb
src/pyturb/combustion/combustion_thermodynamics.py
Combustion.reactants_dictionary
python
def reactants_dictionary(self): return self._reactants_dictionary
Reactants dictionary [gas_species]: moles
https://github.com/mrod5/pyturb/blob/08b4016528fc50733fff58d967d1000bf1e634c9/src/pyturb/combustion/combustion_thermodynamics.py#L90-L94
from pyturb.gas_models.thermo_properties import ThermoProperties from pyturb.gas_models.perfect_ideal_gas import PerfectIdealGas from pyturb.gas_models.semiperfect_ideal_gas import SemiperfectIdealGas import numpy as np import warnings oxidizers = ['Air', 'O', 'O2', 'O3', 'O2(L)', 'O3(L)'] fuels = ['hydrocarbon', 'C8H18,isooctane', 'CH4', 'C2H6', 'C3H8', 'C4H10', 'C5H12', 'C6H14', 'C7H16', 'C8H18', 'CH4O', 'CH3OCH3', 'H2'] inert_gases = ['He', 'Ar', 'N2', 'CO2', 'CO'] class Combustion(object): def __init__(self, fuel, oxidizer): if not(isinstance(fuel, PerfectIdealGas) or isinstance(fuel, SemiperfectIdealGas) or isinstance(fuel, IdealLiquid)): raise TypeError("Object must be PerfectIdealGas, SemiperfectIdealGas or PerfectLiquid. Instead received {}".format(fluid)) if not(isinstance(fuel, PerfectIdealGas) or isinstance(fuel, SemiperfectIdealGas) or isinstance(fuel, IdealLiquid)): raise TypeError("Object must be PerfectIdealGas, SemiperfectIdealGas or PerfectLiquid. Instead received {}".format(fluid)) self.oxidizer_list = oxidizers self.fuel_list = fuels self.fuel = fuel self.oxidizer = oxidizer reactants_status = self._classify_reactants() if not reactants_status: raise ValueError("Unknown fuel and oxidizer") else: self._alpha = 0 self._beta = 0 self._gamma = 0 self._delta = 0 return @property def reactants(self): return self._reactants @property def products(self): return self._products @property
MIT License