content
stringlengths 27
928k
| path
stringlengths 4
230
| size
int64 27
928k
| nl_text
stringlengths 21
396k
| nl_size
int64 21
396k
| nl_language
stringlengths 2
3
| nl_language_score
float64 0.04
1
|
---|---|---|---|---|---|---|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
import re
from typing import Union
from jsonschema import RefResolver
from pydantic import BaseModel, Field
from .streams import DEFAULT_START_DATE, ReportGranularity
class OauthCredSpec(BaseModel):
class Config:
title = "OAuth2.0"
auth_type: str = Field(default="oauth2.0", const=True, order=0)
app_id: str = Field(title="App ID", description="The App ID applied by the developer.", airbyte_secret=True)
secret: str = Field(title="Secret", description="The private key of the developer's application.", airbyte_secret=True)
access_token: str = Field(title="Access Token", description="Long-term Authorized Access Token.", airbyte_secret=True)
class SandboxEnvSpec(BaseModel):
class Config:
title = "Sandbox Access Token"
auth_type: str = Field(default="sandbox_access_token", const=True, order=0)
# it is string because UI has the bug https://github.com/airbytehq/airbyte/issues/6875
advertiser_id: str = Field(
title="Advertiser ID", description="The Advertiser ID which generated for the developer's Sandbox application."
)
access_token: str = Field(title="Access Token", description="The Long-term Authorized Access Token.", airbyte_secret=True)
class ProductionEnvSpec(BaseModel):
class Config:
title = "Production Access Token"
auth_type: str = Field(default="prod_access_token", const=True, order=0)
# it is float because UI has the bug https://github.com/airbytehq/airbyte/issues/6875
app_id: str = Field(description="The App ID applied by the developer.", title="App ID")
secret: str = Field(title="Secret", description="The private key of the developer application.", airbyte_secret=True)
access_token: str = Field(title="Access Token", description="The Long-term Authorized Access Token.", airbyte_secret=True)
class SourceTiktokMarketingSpec(BaseModel):
class Config:
title = "TikTok Marketing Source Spec"
start_date: str = Field(
title="Start Date",
default=DEFAULT_START_DATE,
pattern="^[0-9]{4}-[0-9]{2}-[0-9]{2}$",
description="The Start Date in format: YYYY-MM-DD. Any data before this date will not be replicated. "
"If this parameter is not set, all data will be replicated.",
order=0,
)
report_granularity: str = Field(
title="Report Granularity",
description="Which time granularity should be grouped by; for LIFETIME there will be no grouping. "
"This option is used for reports' streams only.",
default=ReportGranularity.default().value,
enum=[g.value for g in ReportGranularity],
order=1,
)
credentials: Union[OauthCredSpec, ProductionEnvSpec, SandboxEnvSpec] = Field(
title="Authorization Method", order=3, default={}, type="object"
)
@classmethod
def change_format_to_oneOf(cls, schema: dict) -> dict:
new_schema = {}
for key, value in schema.items():
if isinstance(value, dict):
value = cls.change_format_to_oneOf(value)
if key == "anyOf":
new_schema["oneOf"] = value
else:
new_schema[key] = value
return new_schema
@staticmethod
def resolve_refs(schema: dict) -> dict:
json_schema_ref_resolver = RefResolver.from_schema(schema)
str_schema = json.dumps(schema)
for ref_block in re.findall(r'{"\$ref": "#\/definitions\/.+?(?="})"}', str_schema):
ref = json.loads(ref_block)["$ref"]
str_schema = str_schema.replace(ref_block, json.dumps(json_schema_ref_resolver.resolve(ref)[1]))
pyschema = json.loads(str_schema)
del pyschema["definitions"]
return pyschema
@classmethod
def schema(cls) -> dict:
"""we're overriding the schema classmethod to enable some post-processing"""
schema = super().schema()
schema = cls.change_format_to_oneOf(schema)
return cls.resolve_refs(schema)
class CompleteOauthOutputSpecification(BaseModel):
access_token: str = Field(path_in_connector_config=["credentials", "access_token"])
class CompleteOauthServerInputSpecification(BaseModel):
app_id: str = Field()
secret: str = Field()
class CompleteOauthServerOutputSpecification(BaseModel):
app_id: str = Field(path_in_connector_config=["credentials", "app_id"])
secret: str = Field(path_in_connector_config=["credentials", "secret"])
| airbyte-integrations/connectors/source-tiktok-marketing/source_tiktok_marketing/spec.py | 4,541 | we're overriding the schema classmethod to enable some post-processing
Copyright (c) 2021 Airbyte, Inc., all rights reserved. it is string because UI has the bug https://github.com/airbytehq/airbyte/issues/6875 it is float because UI has the bug https://github.com/airbytehq/airbyte/issues/6875 | 296 | en | 0.835223 |
from TASSELpy.java.lang.Number import Number, metaNumber
from TASSELpy.java.lang.Comparable import Comparable
from TASSELpy.utils.DocInherit import DocInherit
from TASSELpy.utils.Overloading import javaOverload,javaConstructorOverload
from TASSELpy.javaObj import javaObj
from TASSELpy.utils.helper import make_sig
from abc import ABCMeta
import numpy as np
java_imports = {'Long':'java/lang/Long',
'String':'java/lang/String'}
class metaLong:
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
if C == np.int64:
return True
elif C == np.uint64:
return True
elif issubclass(C,Long):
return True
elif issubclass(C,long):
return True
else:
return False
## Wrapper class for java.lang.Long
class Long(Comparable, Number):
"""
Wrapper class for java.lang.Long
"""
_java_name = java_imports['Long']
@javaConstructorOverload(java_imports['Long'],
(make_sig(['long'],'void'),(metaLong,)),
(make_sig([java_imports['String']],'void'),(str,)))
def __init__(self, *args, **kwargs):
"""
Instantiates a new Long
Signatures:
Long(long value)
Long(String s)
Arguments:
Long(long value)
value -- The long to wrap in the object
Long (String s)
s -- The string representing the long
"""
super(Long, self).__init__(*args, generic=(Long,), **kwargs)
@DocInherit
@javaOverload("compareTo",
(make_sig([java_imports['Long']],'int'),(metaLong,),None))
def compareTo(self, *args):
pass
###################################
## Numeric magic methods
###################################
def __pos__(self):
return Long(+self.toPrimative())
def __neg__(self):
return Long(-self.toPrimative())
def __abs__(self):
return Long(abs(self.toPrimativelongValue()))
def __invert__(self):
return Long(~self.toPrimative())
def __floor__(self):
return Long(np.floor(self.toPrimative()))
def __ceil__(self):
return Long(np.ceil(self.toPrimative()))
###################################
## Arithmetic magic methods
###################################
def __add__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() + other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() + other))
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() - other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() - other))
def __rsub__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative()-self.toPrimative()))
else:
return Long(np.int64(other-self.toPrimative()))
def __isub__(self, other):
return self.__sub__(other)
def __mul__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() * other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() * other))
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
return self.__mul__(other)
def __floordiv__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() // other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() // other))
def __rfloordiv__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() // self.toPrimative()))
else:
return Long(np.int64(other // self.toPrimative()))
def __ifloordiv__(self, other):
return self.__floordiv__(other)
def __div__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() / other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() / other))
def __rdiv__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() / self.toPrimative()))
else:
return Long(np.int64(other / self.toPrimative()))
def __idiv__(self, other):
return self.__div__(other)
def __mod__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() % other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() % other))
def __rmod__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() % self.toPrimative()))
else:
return Long(np.int64(other % self.toPrimative()))
def __imod__(self, other):
return self.__mod__(other)
def __pow__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() ** other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() ** other))
def __rpow__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() ** self.toPrimative()))
else:
return Long(np.int64(other ** self.toPrimative()))
def __ipow__(self, other):
return self.__pow__(other)
def __lshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() << other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() << other))
def __rlshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() << self.toPrimative()))
else:
return Long(np.int64(other << self.toPrimative()))
def __ilshift__(self, other):
return self.__lshift__(other)
def __rshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() >> other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() >> other))
def __rrlshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() >> self.toPrimative()))
else:
return Long(np.int64(other >> self.toPrimative()))
def __irshift__(self, other):
return self.__rshift__(other)
def __and__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() & other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() & other))
def __rand__(self, other):
return self.__and__(other)
def __iand__(self, other):
return self.__and__(other)
def __or__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() | other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() | other))
def __ror__(self, other):
return self.__or__(other)
def __ior__(self, other):
return self.__or__(other)
def __xor__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() ^ other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() ^ other))
def __rxor__(self, other):
return self.__xor__(other)
def __ixor__(self, other):
return self.__xor__(other)
def __repr__(self):
return "Long(%d)" % self.longValue()
@DocInherit
def toPrimative(self):
return self.longValue()
| TASSELpy/java/lang/Long.py | 8,941 | Wrapper class for java.lang.Long
Instantiates a new Long
Signatures:
Long(long value)
Long(String s)
Arguments:
Long(long value)
value -- The long to wrap in the object
Long (String s)
s -- The string representing the long
Wrapper class for java.lang.Long Numeric magic methods Arithmetic magic methods | 314 | en | 0.281293 |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-t#@y8e6d21m2+#l#m00+pi&d0eyqa2a6v09hle&!6di(d4th*0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| profiles_project/settings.py | 3,389 | Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
Build paths inside the project like this: BASE_DIR / 'subdir'. Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/3.2/ref/settings/databases Password validation https://docs.djangoproject.com/en/3.2/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/3.2/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/3.2/howto/static-files/ Default primary key field type https://docs.djangoproject.com/en/3.2/ref/settings/default-auto-field | 1,091 | en | 0.664137 |
import os
import re
import struct
import glob
import numpy as np
import frame_utils
import skimage
import skimage.io
import torch
from torch.utils.data import Dataset
class KLens(Dataset):
#def __init__(self,raft_path="/data2/opticalflow/rnd/opticalflow/RAFT/out_klens_raft_chairs", root_path="/data2/opticalflow/KLENS/images/",root_path2="/data2/opticalflow/KLENS/pins/",filenumberlist=["0030","1106","1113","1132","1134","1167","1173"],split="train",ref="",meas=""):
def __init__(self,raft_path="/data2/opticalflow/algo_comp/flownet2/out/", root_path="/data2/opticalflow/KLENS/images/",root_path2="/data2/opticalflow/KLENS/pins/",filenumberlist=["0030","1106","1113","1132","1134","1167","1173"],split="train",ref="",meas=""):
super(KLens, self).__init__()
self.split = split
raftflowpaths = glob.glob(os.path.join(raft_path,"*.flo"))
file_list = {}
file_list['train'] = []
file_list['valid'] = []
file_list['test'] = []
file_list['train+valid'] = []
for filenum in filenumberlist:
for raftflowpath in raftflowpaths:
#print(raftflowpath)
if "KLE_"+filenum in raftflowpath:
file_list['train'].append([os.path.join(root_path,"KLE_"+filenum+".jpg3.png"),os.path.join(root_path,"KLE_"+filenum+".jpg5.png"),raftflowpath])
file_list["train"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]])
file_list["valid"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]])
file_list["test"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]])
file_list["train+valid"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]])
#file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_3.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_3.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_3.jpg")],])
#file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_0.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_0.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_0.jpg")],])
#file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_1.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_1.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_1.jpg")],])
#file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_4.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_4.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_4.jpg")],])
self.dataset = file_list
def __len__(self):
return len(self.dataset[self.split])
def __getitem__(self, idx):
try:
im0_path, im1_path, raftflow_path = self.dataset[self.split][idx]
raftflow = frame_utils.readFlow(raftflow_path)
except:
im0_path, im1_path = self.dataset[self.split][idx]
raftflow = np.array([])
img0 = skimage.io.imread(im0_path)
img1 = skimage.io.imread(im1_path)
img0 = torch.tensor(img0/255.).float()
img1 = torch.tensor(img1/255.).float()
return img0, img1,np.array([]),np.array([]), [im0_path , im1_path],raftflow
class Flo:
def __init__(self, w, h):
self.__floec1__ = float(202021.25)
self.__floec2__ = int(w)
self.__floec3__ = int(h)
self.__floheader__ = struct.pack('fii', self.__floec1__, self.__floec2__, self.__floec3__)
self.__floheaderlen__ = len(self.__floheader__)
self.__flow__ = w
self.__floh__ = h
self.__floshape__ = [self.__floh__, self.__flow__, 2]
if self.__floheader__[:4] != b'PIEH':
raise Exception('Expect machine to be LE.')
def load(self, file):
with open(file, 'rb') as fp:
if fp.read(self.__floheaderlen__) != self.__floheader__:
raise Exception('Bad flow header: ' + file)
result = np.ndarray(shape=self.__floshape__,
dtype=np.float32,
buffer=fp.read(),
order='C')
return result
def save(self, arr, fname):
with open(fname, 'wb') as fp:
fp.write(self.__floheader__)
fp.write(arr.astype(np.float32).tobytes())
| data_loaders/KLens.py | 7,492 | def __init__(self,raft_path="/data2/opticalflow/rnd/opticalflow/RAFT/out_klens_raft_chairs", root_path="/data2/opticalflow/KLENS/images/",root_path2="/data2/opticalflow/KLENS/pins/",filenumberlist=["0030","1106","1113","1132","1134","1167","1173"],split="train",ref="",meas=""):print(raftflowpath)file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_3.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_3.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_3.jpg")],])file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_0.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_0.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_0.jpg")],])file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_1.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_1.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_1.jpg")],])file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_4.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_4.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_4.jpg")],]) | 1,461 | en | 0.344014 |
# Telegram settings
TG_CLI = '/opt/tg/bin/telegram-cli'
TG_PUBKEY = '/opt/tg/tg-server.pub'
RECEPIENT = '@your-tg-recepient'
# Reddit App settings
REDDIT_APP_KEY = 'c...w'
REDDIT_APP_SECRET = 'T...c'
REDDIT_USER_AGENT = ('Damaris Bot, v0.1. Read only bot to read posts from'
'/r/cats')
# Sample Captions
CAPTIONS = [
"some",
"random",
"strings",
]
| sample_settings.py | 383 | Telegram settings Reddit App settings Sample Captions | 53 | en | 0.707184 |
import os
import pickle
import numpy as np
from tqdm import tqdm
from deeptutor.envs.DashEnv import *
from deeptutor.envs.EFCEnv import EFCEnv
from deeptutor.envs.HRLEnv import *
from deeptutor.infrastructure.utils import *
from deeptutor.tutors.LeitnerTutor import LeitnerTutor
from deeptutor.tutors.RandTutor import RandTutor
from deeptutor.tutors.PPOTutor import PPOTutor
from deeptutor.tutors.SACTutor import SACTutor
from deeptutor.tutors.DQNTutor import DQNTutor
from deeptutor.tutors.MLPTRPOTutor import MLPTRPOTutor
from deeptutor.tutors.GRUTRPOTutor import GRUTRPOTutor
from deeptutor.tutors.SuperMnemoTutor import SuperMnemoTutor
from deeptutor.tutors.ThresholdTutor import ThresholdTutor
def load_rewards(tutor_name, data_dir):
filename = os.path.join(data_dir, f"{tutor_name}_reward_logs.pkl")
if not os.path.exists(filename):
return {}
with open(filename, "rb") as f:
return pickle.load(f)["rewards"]
def main():
override = True # override existing data
data_dir = os.path.join(os.getcwd(), "data")
n_steps = 200
n_items = 30
const_delay = 5
discount = 0.99
n_reps = 10
n_eps = 100
env_kwargs = {
"n_items": n_items,
"n_steps": n_steps,
"discount": discount,
"sample_delay": sample_const_delay(const_delay),
}
reward_funcs = [
"likelihood",
"log_likelihood"
]
envs = [
("EFC", EFCEnv),
("HLR", HLREnv),
("DASH", DASHEnv)
]
tutor_builders = [
# ("Random", RandTutor),
# ("Leitner", LeitnerTutor),
# ("SuperMnemo", SuperMnemoTutor),
# ("Threshold", ThresholdTutor),
# ("MLPTRPO", MLPTRPOTutor),
# ("GRUTRPO", GRUTRPOTutor),
# ("PPO", PPOTutor),
("DQN", DQNTutor),
]
rl_tutors = [MLPTRPOTutor, GRUTRPOTutor, PPOTutor, DQNTutor]
reward_logs = {
"n_steps": n_steps,
"n_items": n_items,
"discount": discount,
"const_delay": const_delay,
"n_reps": n_reps,
"n_eps": n_eps,
"reward_funcs": reward_funcs,
}
for i, (tutor_name, build_tutor) in enumerate(tutor_builders):
print(f"Training {tutor_name}")
rewards = load_rewards(tutor_name, data_dir)
for h, (base_env_name, base_env) in enumerate(envs):
for m, reward_func in enumerate(reward_funcs):
env_name = (
base_env_name + "-" + ("L" if reward_func == "likelihood" else "LL")
)
print(f"Environment: {env_name}")
if env_name in rewards.keys() and not override:
print("Skipping\n")
continue
R = np.zeros((n_eps, n_reps))
for j in tqdm(range(n_reps)):
np.random.seed(j)
env = base_env(**env_kwargs, reward_func=reward_func)
if build_tutor in rl_tutors:
rl_env = make_rl_student_env(env)
agent = build_tutor(n_items)
R[:, j] = agent.train(rl_env, n_eps=n_eps, seed=j)
else:
if "Thresh" in tutor_name:
agent = build_tutor(n_items, env=env)
else:
agent = build_tutor(n_items)
R[:, j] = agent.train(env, n_eps=n_eps)
rewards[env_name] = R
reward_logs["rewards"] = rewards
with open(os.path.join(data_dir, f"{tutor_name}_reward_logs.pkl"), "wb") as f:
pickle.dump(reward_logs, f, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
| deeptutor/scripts/run.py | 3,743 | override existing data ("Random", RandTutor), ("Leitner", LeitnerTutor), ("SuperMnemo", SuperMnemoTutor), ("Threshold", ThresholdTutor), ("MLPTRPO", MLPTRPOTutor), ("GRUTRPO", GRUTRPOTutor), ("PPO", PPOTutor), | 209 | en | 0.301484 |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDecorator(PythonPackage):
"""The aim of the decorator module it to simplify the usage of decorators
for the average programmer, and to popularize decorators by showing
various non-trivial examples."""
homepage = "https://github.com/micheles/decorator"
url = "https://pypi.io/packages/source/d/decorator/decorator-4.4.0.tar.gz"
version('4.4.0', sha256='86156361c50488b84a3f148056ea716ca587df2f0de1d34750d35c21312725de')
version('4.3.0', sha256='c39efa13fbdeb4506c476c9b3babf6a718da943dab7811c206005a4a956c080c')
version('4.0.9', sha256='90022e83316363788a55352fe39cfbed357aa3a71d90e5f2803a35471de4bba8')
depends_on('python@2.6:2.8,3.2:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
| var/spack/repos/builtin/packages/py-decorator/package.py | 984 | The aim of the decorator module it to simplify the usage of decorators
for the average programmer, and to popularize decorators by showing
various non-trivial examples.
Copyright 2013-2019 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) | 359 | en | 0.775344 |
"""Support for Aqualink pool lights."""
from iaqualink import AqualinkLightEffect
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_EFFECT,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from . import AqualinkEntity, refresh_system
from .const import DOMAIN as AQUALINK_DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up discovered lights."""
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkLight(dev))
async_add_entities(devs, True)
class HassAqualinkLight(AqualinkEntity, LightEntity):
"""Representation of a light."""
@property
def name(self) -> str:
"""Return the name of the light."""
return self.dev.label
@property
def is_on(self) -> bool:
"""Return whether the light is on or off."""
return self.dev.is_on
@refresh_system
async def async_turn_on(self, **kwargs) -> None:
"""Turn on the light.
This handles brightness and light effects for lights that do support
them.
"""
brightness = kwargs.get(ATTR_BRIGHTNESS)
effect = kwargs.get(ATTR_EFFECT)
# For now I'm assuming lights support either effects or brightness.
if effect:
effect = AqualinkLightEffect[effect].value
await self.dev.set_effect(effect)
elif brightness:
# Aqualink supports percentages in 25% increments.
pct = int(round(brightness * 4.0 / 255)) * 25
await self.dev.set_brightness(pct)
else:
await self.dev.turn_on()
@refresh_system
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the light."""
await self.dev.turn_off()
@property
def brightness(self) -> int:
"""Return current brightness of the light.
The scale needs converting between 0-100 and 0-255.
"""
return self.dev.brightness * 255 / 100
@property
def effect(self) -> str:
"""Return the current light effect if supported."""
return AqualinkLightEffect(self.dev.effect).name
@property
def effect_list(self) -> list:
"""Return supported light effects."""
return list(AqualinkLightEffect.__members__)
@property
def supported_features(self) -> int:
"""Return the list of features supported by the light."""
if self.dev.is_dimmer:
return SUPPORT_BRIGHTNESS
if self.dev.is_color:
return SUPPORT_EFFECT
return 0
| homeassistant/components/iaqualink/light.py | 2,766 | Representation of a light.
Return current brightness of the light.
The scale needs converting between 0-100 and 0-255.
Return the current light effect if supported.
Return supported light effects.
Return whether the light is on or off.
Return the name of the light.
Return the list of features supported by the light.
Support for Aqualink pool lights.
For now I'm assuming lights support either effects or brightness. Aqualink supports percentages in 25% increments. | 469 | en | 0.881435 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
import argparse
import cld2
import langid
import sys
""" Removes some wrongly aligned pairs from hunalign output """
class LanguageIdentifier(object):
def __init__(self, use_cld2, valid_languages=None):
self.use_cld2 = use_cld2
self.valid_languages = [l.lower() for l in valid_languages]
if not use_cld2 and valid_languages:
langid.set_languages(self.valid_languages)
def is_language(self, s, expected_lang):
""" Check if the language of the segment cannot be reliably identified
as another language. If another than the expected language is
detected return False """
expected_lang = expected_lang.lower()
if self.valid_languages:
assert expected_lang in self.valid_languages
if self.use_cld2:
reliable, _text_bytes, details = cld2.detect(
s.encode("utf-8"),
isPlainText=True,
useFullLangTables=True,
bestEffort=True)
if reliable:
for _lang, langcode, confidence, score in details:
if langcode == expected_lang and confidence >= 10:
return True
return False
else: # unreliable is still counted as OK
return True
else:
lang, confidence = langid.classify(source.lower())
if lang != expected_lang and confidence > 0.9:
# confidence for wrong language higher than 90%
return False
else:
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('-deleted', help='file to keep deleted lines',
type=argparse.FileType('w'))
parser.add_argument('-minscore', type=float, default=0,
help='minimum score from hunalign')
parser.add_argument('-slang', '--lang1', help='source language',
dest='source_lang', default='en')
parser.add_argument('-tlang', '--lang2', help='target language',
dest='target_lang', default='fr')
parser.add_argument('-cld2', help='use CLD2 instead of langid.py',
action='store_true')
args = parser.parse_args()
deletions = defaultdict(list)
n_written = 0
n_total = 0
lid = LanguageIdentifier(args.cld2, [args.source_lang, args.target_lang])
for line in args.infile:
n_total += 1
score = 1.0
split_line = line.rstrip('\n').split("\t")
if len(split_line) == 5:
split_line = split_line[-3:]
if len(split_line) == 3:
source, target, score = split_line
else:
assert len(split_line) == 2
source, target = split_line
source = source.decode('utf-8', 'ignore')
target = target.decode('utf-8', 'ignore')
if source == target:
deletions["identical"].append(target)
continue
if not source.strip():
deletions["source_empty"].append('')
continue
elif not target.strip():
deletions["target_empty"].append('')
continue
if float(score) < args.minscore:
deletions["low score"].append("\t".join((source, target, score)))
continue
if float((len(source) + 15)) / float(len(target) + 15) > 1.5:
deletions["source_too_long"].append("%s\t%s" % (source, target))
continue
if float((len(target) + 15)) / float(len(source) + 15) > 1.5:
deletions["source_too_short"].append("%s\t%s" % (source, target))
continue
if not lid.is_language(source, args.source_lang):
deletions["source_lang"].append(source)
continue
if not lid.is_language(target, args.target_lang):
deletions["target_lang"].append(target)
continue
args.outfile.write(line)
n_written += 1
if args.deleted:
args.deleted.write("Written: %d of %d = %f percent\n" %
(n_written, n_total,
100. * n_written / max((1, n_total))))
for reason, deleted in deletions.iteritems():
args.deleted.write("Deleted %d items due to %s\n"
% (len(deleted), reason))
for line in deleted:
if line.strip():
args.deleted.write("\t%s\n" % line.encode('utf-8'))
| baseline/filter_hunalign_bitext.py | 4,854 | Check if the language of the segment cannot be reliably identified
as another language. If another than the expected language is
detected return False
!/usr/bin/env python -*- coding: utf-8 -*- unreliable is still counted as OK confidence for wrong language higher than 90% | 275 | en | 0.925444 |
"""Define abstract base classes to construct FileFinder classes."""
import os
import shutil
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Sequence, Union
import mne_bids
@dataclass
class FileFinder(ABC):
"""Basic representation of class for finding and filtering files."""
hemispheres: Union[dict, None] = field(default_factory=dict)
directory: Union[Path, str] = field(init=False)
files: list = field(init=False, default_factory=list)
def __str__(self):
if not self.files:
return "No corresponding files found."
headers = ["Index", "Filename"]
col_width = max(len(os.path.basename(file)) for file in self.files)
format_row = f"{{:>{len(headers[0]) + 2}}}{{:>{col_width + 2}}}"
terminal_size = "\u2500" * shutil.get_terminal_size().columns
return "\n".join(
(
"Corresponding files found:",
"".join(
f"{{:>{len(header) + 2}}}".format(header)
for header in headers
),
terminal_size,
*(
format_row.format(idx, os.path.basename(file))
for idx, file in enumerate(self.files)
),
)
)
def __len__(self) -> int:
if not self.files:
return 0
return len(self.files)
@abstractmethod
def find_files(
self,
directory: Union[str, Path],
extensions: Optional[Union[Sequence, str]] = None,
keywords: Optional[Union[list, str]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list]] = None,
verbose: bool = False,
) -> None:
"""Find files in directory with optional
keywords and extensions."""
@abstractmethod
def filter_files(
self,
keywords: Optional[Union[str, list]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list]] = None,
verbose: bool = False,
) -> None:
"""Filter list of filepaths for given parameters."""
@staticmethod
def _keyword_search(
files: list[str], keywords: Optional[Union[str, list]]
) -> list:
if not keywords:
return files
if not isinstance(keywords, list):
keywords = [keywords]
filtered_files = [
file for file in files if any(key in file for key in keywords)
]
return filtered_files
def _find_files(
self,
directory: Union[Path, str],
extensions: Optional[Union[list, str]] = None,
) -> None:
"""Find files in directory with optional extensions.
Args:
directory (string)
keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional)
extensions (list): e.g. [".json" or "tsv"] (optional)
verbose (bool): verbosity level (optional, default=True)
"""
files = []
for root, _, fnames in os.walk(directory):
fnames = [os.path.join(root, file) for file in fnames]
fnames = self._keyword_search(fnames, extensions)
if fnames:
files.extend(fnames)
self.files = files
def _filter_files(
self,
keywords: Optional[Union[str, list[str]]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list[str]]] = None,
) -> None:
"""Filter filepaths for given parameters."""
filtered_files = self.files
if exclude:
if not isinstance(exclude, list):
exclude = [exclude]
filtered_files = [
file
for file in filtered_files
if not any(item in file for item in exclude)
]
if keywords:
if not isinstance(keywords, list):
keywords = [keywords]
filtered_files = self._keyword_search(filtered_files, keywords)
if stimulation:
if stimulation.lower() in "stimon":
stim = "StimOn"
elif stimulation.lower() in "stimoff":
stim = "StimOff"
else:
raise ValueError("Keyword for stimulation not valid.")
filtered_files = self._keyword_search(filtered_files, [stim])
if medication:
if medication.lower() in "medon":
med = "MedOn"
elif medication.lower() in "medoff":
med = "MedOff"
else:
raise ValueError("Keyword for medication not valid.")
filtered_files = self._keyword_search(filtered_files, [med])
if hemisphere:
matching_files = []
for file in filtered_files:
subject = mne_bids.get_entities_from_fname(file)["subject"]
if (
subject not in self.hemispheres
or self.hemispheres[subject] is None
):
raise HemisphereNotSpecifiedError(
subject, self.hemispheres
)
hem = self.hemispheres[subject] + "_"
if hemisphere.lower() in "ipsilateral" and hem in file:
matching_files.append(file)
if hemisphere.lower() in "contralateral" and hem not in file:
matching_files.append(file)
filtered_files = matching_files
self.files = filtered_files
class DirectoryNotFoundError(Exception):
"""Exception raised when invalid Reader is passed.
Attributes:
directory -- input directory which caused the error
"""
def __init__(
self,
directory: Union[Path, str],
message="Input directory was not found.",
):
self.directory = directory
self.message = message
super().__init__(self.message)
def __str__(self):
return f"{self.message} Got: {self.directory}."
class HemisphereNotSpecifiedError(Exception):
"""Exception raised when electrode hemisphere is not specified in settings.
Attributes:
subject -- input subject which caused the error
hemisphere -- specified hemispheres
message -- explanation of the error
"""
def __init__(
self,
subject,
hemispheres,
message=(
"Input ECOG hemisphere is not specified in"
" `filefinder_settings.py` for given subject."
),
) -> None:
self.subject = subject
self.hemispheres = hemispheres
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"{self.message} Unspecified subject: {self.subject}."
f" Specified hemispheres: {self.hemispheres}."
)
| src/pte/filetools/filefinder_abc.py | 7,223 | Exception raised when invalid Reader is passed.
Attributes:
directory -- input directory which caused the error
Basic representation of class for finding and filtering files.
Exception raised when electrode hemisphere is not specified in settings.
Attributes:
subject -- input subject which caused the error
hemisphere -- specified hemispheres
message -- explanation of the error
Filter filepaths for given parameters.
Find files in directory with optional extensions.
Args:
directory (string)
keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional)
extensions (list): e.g. [".json" or "tsv"] (optional)
verbose (bool): verbosity level (optional, default=True)
Filter list of filepaths for given parameters.
Find files in directory with optional
keywords and extensions.
Define abstract base classes to construct FileFinder classes. | 876 | en | 0.568423 |
__all__ = ("group_attempts", "fails_filter", "reduce_to_failures",)
def group_attempts(sequence, filter_func=None):
if filter_func is None:
filter_func = lambda x:True
last, l = None, []
for x in sequence:
if isinstance(x, tuple) and x[0] == 'inspecting':
if l:
yield last, l
last, l = x[1], []
elif last is not None:
if filter_func(x):
# inline ignored frames
if getattr(x, 'ignored', False):
l.extend(y for y in x.events if filter_func(y))
else:
l.append(x)
if l:
yield last, l
def fails_filter(x):
if not isinstance(x, tuple):
return not x.succeeded
if x[0] == "viable":
return not x[1]
return x[0] != "inspecting"
def reduce_to_failures(frame):
if frame.succeeded:
return []
l = [frame]
for pkg, nodes in group_attempts(frame.events, fails_filter):
l2 = []
for x in nodes:
if not isinstance(x, tuple):
l2.append(reduce_to_failures(x))
else:
l2.append(x)
l.append((pkg, l2))
return l
| src/pkgcore/resolver/util.py | 1,211 | inline ignored frames | 21 | en | 0.389758 |
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Dinner problem in Z3
#
# From http://www.sellsbrothers.com/spout/#The_Logic_of_Logic
# """
# My son came to me the other day and said, "Dad, I need help with a
# math problem." The problem went like this:
#
# * We're going out to dinner taking 1-6 grandparents, 1-10 parents and/or 1-40 children
# * Grandparents cost $3 for dinner, parents $2 and children $0.50
# * There must be 20 total people at dinner and it must cost $20
# * How many grandparents, parents and children are going to dinner?
# """
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
n = 3
# variables
# x = makeIntVector(sol, "x", 3, 1, 100)
# x = makeRealVector(sol, "x", 3, 1, 100)
# Grandparents, Parents, Children = x
Grandparents = makeRealVar(sol,"Grandparents", 1,6)
Parents = makeRealVar(sol,"Parents", 1,10)
Children = makeRealVar(sol,"Children", 1,40)
# constraints
#
# sol.add(3*Grandparents + 2*Parents + Children/2 == 20)
# multiply with 2:
sol.add(Grandparents * 6 + Parents * 4 + Children * 1 == 40)
# Grandparents + Parents + Children = 20 /\
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print([mod.eval(x) for x in [Grandparents,Parents,Children]])
getDifferentSolution(sol,mod,[Grandparents,Parents,Children])
if num_solutions > 5:
break;
print('num_solutions:', num_solutions)
| z3/dinner.py | 1,511 | !/usr/bin/python -u -*- coding: latin-1 -*- Dinner problem in Z3 From http://www.sellsbrothers.com/spout/The_Logic_of_Logic """ My son came to me the other day and said, "Dad, I need help with a math problem." The problem went like this: * We're going out to dinner taking 1-6 grandparents, 1-10 parents and/or 1-40 children * Grandparents cost $3 for dinner, parents $2 and children $0.50 * There must be 20 total people at dinner and it must cost $20 * How many grandparents, parents and children are going to dinner? """ This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com) See also my Z3 page: http://hakank.org/z3/ variables x = makeIntVector(sol, "x", 3, 1, 100) x = makeRealVector(sol, "x", 3, 1, 100) Grandparents, Parents, Children = x constraints sol.add(3*Grandparents + 2*Parents + Children/2 == 20) multiply with 2: Grandparents + Parents + Children = 20 /\ | 895 | en | 0.88553 |
# -----------------------------------------------------------------------------
# Libraries
# -----------------------------------------------------------------------------
# Core libs
from typing import TYPE_CHECKING
# Third party libs
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
# Project libs
from apps.users.models import ClientAddress
from apps.users.serializers.client_address import (
ClientAddressCreateSerializer,
ClientAddressRetrieveSerializer,
)
# If type checking, __all__
if TYPE_CHECKING:
pass
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class ClientAddressCreateListView(ListCreateAPIView):
queryset = ClientAddress.objects.all()
serializer_class = ClientAddressCreateSerializer
class ClientAddressRetrieveUpdateView(RetrieveUpdateDestroyAPIView):
queryset = ClientAddress.objects.all()
serializer_class = ClientAddressRetrieveSerializer
| src/apps/users/views/rest/client_address.py | 1,401 | ----------------------------------------------------------------------------- Libraries ----------------------------------------------------------------------------- Core libs Third party libs Project libs If type checking, __all__ ----------------------------------------------------------------------------- Constants ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- Functions ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- Classes ----------------------------------------------------------------------------- | 727 | en | 0.164674 |
import pyasdf
import numpy as np
import scipy.fftpack
import matplotlib.pyplot as plt
'''
this script takes a chunk of noise spectrum for a station pair and
compare their cross-correlation functions computed using two schemes:
one is averaging the frequency domain and the other is in the time
domain
'''
def cross_correlation1(fft1,fft2,maxlag,dt,Nfft):
#------convert all 2D arrays into 1D to speed up--------
corr = np.zeros(fft1.shape,dtype=np.complex64)
corr = np.conj(fft1) * fft2
ncorr = np.zeros((fft1.shape[0],Nfft),dtype=np.complex64)
ncorr[:,:Nfft//2] = corr[:,:]
ncorr[:,-(Nfft//2)+1:]=np.flip(np.conj(ncorr[:,1:(Nfft//2)]),axis=1)
ncorr[:,0]=complex(0,0)
ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=1)))
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
ncorr = ncorr[:,ind]
ncorr = np.mean(ncorr,axis=0)
return ncorr
def cross_correlation2(fft1,fft2,maxlag,dt,Nfft):
#------convert all 2D arrays into 1D to speed up--------
corr = np.zeros(fft1.shape,dtype=np.complex64)
corr = np.conj(fft1) * fft2
ncorr = np.zeros(shape=Nfft,dtype=np.complex64)
ncorr[:Nfft//2] = np.mean(corr,axis=0)
ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)
ncorr[0]=complex(0,0)
ncorr = np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0))
print(ncorr.real,ncorr.imag)
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
ncorr = ncorr[ind]
return ncorr
#-----common parameters------
iday = '2010_01_10'
icomp = 'EHZ'
dt = 0.05
maxlag = 800
sfile1 = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/FFT/N.AC2H.h5'
sfile2 = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/FFT/N.CHHH.h5'
#-----------reading the data------------
ds1 = pyasdf.ASDFDataSet(sfile1,mode='r')
ds2 = pyasdf.ASDFDataSet(sfile2,mode='r')
spect1 = ds1.auxiliary_data[icomp][iday].data[:]
spect2 = ds2.auxiliary_data[icomp][iday].data[:]
std1 = ds1.auxiliary_data[icomp][iday].parameters['std']
std2 = ds2.auxiliary_data[icomp][iday].parameters['std']
nwin = spect1.shape[0]
nfft = spect1.shape[1]*2
print('data dimension for spect1 and spect2 are %d and %d' % (spect1.ndim,spect2.ndim))
#------select the sections-------
indx1 = np.where(std1<10)[0]
indx2 = np.where(std2<10)[0]
bb=np.intersect1d(indx1,indx2)
print(spect1[bb,:],spect2[bb,:])
corr1=cross_correlation1(spect1[bb,:],spect2[bb,:],np.round(maxlag),dt,nfft)
corr2=cross_correlation2(spect1[bb,:],spect2[bb,:],np.round(maxlag),dt,nfft)
#---plotting----
plt.subplot(311)
plt.plot(corr1)
plt.subplot(312)
plt.plot(corr2)
plt.subplot(313)
plt.plot(corr2)
plt.plot(corr1)
plt.show() | test/data_check/check_linearity_fft.py | 2,775 | ------convert all 2D arrays into 1D to speed up--------------convert all 2D arrays into 1D to speed up-------------common parameters-----------------reading the data------------------select the sections----------plotting---- | 224 | en | 0.491519 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorRealIFFTNMixin, validate_fftn, TensorRealFFTN
class TensorIRFFT2(TensorRealFFTN, TensorRealIFFTNMixin):
_op_type_ = OperandDef.IRFFT2
def __init__(self, shape=None, axes=None, norm=None, **kw):
super().__init__(_shape=shape, _axes=axes, _norm=norm, **kw)
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input tensor
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
Normalization mode (see `mt.fft`). Default is None.
Returns
-------
out : Tensor
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
if len(axes) != 2:
raise ValueError("axes length should be 2")
a = astensor(a)
axes = validate_fftn(a, s=s, axes=axes, norm=norm)
op = TensorIRFFT2(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.float_))
return op(a)
| mars/tensor/fft/irfft2.py | 2,089 | Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input tensor
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
Normalization mode (see `mt.fft`). Default is None.
Returns
-------
out : Tensor
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
!/usr/bin/env python -*- coding: utf-8 -*- Copyright 1999-2020 Alibaba Group Holding Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1,252 | en | 0.742303 |
"""WebPush Style Autopush Router
This router handles notifications that should be dispatched to an Autopush
node, or stores each individual message, along with its data, in a Message
table for retrieval by the client.
"""
import json
import time
from StringIO import StringIO
from typing import Any # noqa
from botocore.exceptions import ClientError
from twisted.internet.threads import deferToThread
from twisted.web.client import FileBodyProducer
from twisted.internet.defer import (
inlineCallbacks,
returnValue,
CancelledError,
)
from twisted.internet.error import (
ConnectError,
ConnectionClosed,
ConnectionRefusedError,
)
from twisted.logger import Logger
from twisted.web._newclient import ResponseFailed
from twisted.web.http import PotentialDataLoss
from autopush.exceptions import ItemNotFound, RouterException
from autopush.metrics import make_tags
from autopush.protocol import IgnoreBody
from autopush.router.interface import RouterResponse
from autopush.types import JSONDict # noqa
TTL_URL = "https://webpush-wg.github.io/webpush-protocol/#rfc.section.6.2"
class WebPushRouter(object):
"""Implements :class: `autopush.router.interface.IRouter` for internal
routing to an autopush node
"""
log = Logger()
def __init__(self, conf, router_conf, db, agent):
"""Create a new Router"""
self.conf = conf
self.router_conf = router_conf
self.db = db
self.agent = agent
@property
def metrics(self):
return self.db.metrics
def register(self, uaid, router_data, app_id, *args, **kwargs):
# type: (str, JSONDict, str, *Any, **Any) -> None
"""No additional routing data"""
def amend_endpoint_response(self, response, router_data):
# type: (JSONDict, JSONDict) -> None
"""Stubbed out for this router"""
@inlineCallbacks
def route_notification(self, notification, uaid_data):
"""Route a notification to an internal node, and store it if the node
can't deliver immediately or is no longer a valid node
"""
# Determine if they're connected at the moment
node_id = uaid_data.get("node_id")
uaid = uaid_data["uaid"]
router = self.db.router
# Node_id is present, attempt delivery.
# - Send Notification to node
# - Success: Done, return 200
# - Error (Node busy): Jump to Save notification below
# - Error (Client gone, node gone/dead): Clear node entry for user
# - Both: Done, return 503
if node_id:
result = None
try:
result = yield self._send_notification(uaid, node_id,
notification)
except (ConnectError, ConnectionClosed, ResponseFailed,
CancelledError, PotentialDataLoss) as exc:
self.metrics.increment("updates.client.host_gone")
yield deferToThread(router.clear_node,
uaid_data).addErrback(self._eat_db_err)
if isinstance(exc, ConnectionRefusedError):
# Occurs if an IP record is now used by some other node
# in AWS or if the connection timesout.
self.log.debug("Could not route message: {exc}", exc=exc)
if result and result.code == 200:
returnValue(self.delivered_response(notification))
# Save notification, node is not present or busy
# - Save notification
# - Success (older version): Done, return 202
# - Error (db error): Done, return 503
try:
yield self._save_notification(uaid_data, notification)
except ClientError as e:
log_exception = (e.response["Error"]["Code"] !=
"ProvisionedThroughputExceededException")
raise RouterException("Error saving to database",
status_code=503,
response_body="Retry Request",
log_exception=log_exception,
errno=201)
# - Lookup client again to get latest node state after save.
# - Success (node found): Notify node of new notification
# - Success: Done, return 200
# - Error (no client): Done, return 202
# - Error (no node): Clear node entry
# - Both: Done, return 202
# - Success (no node): Done, return 202
# - Error (db error): Done, return 202
# - Error (no client) : Done, return 404
try:
uaid_data = yield deferToThread(router.get_uaid, uaid)
except ClientError:
returnValue(self.stored_response(notification))
except ItemNotFound:
self.metrics.increment("updates.client.deleted")
raise RouterException("User was deleted",
status_code=410,
response_body="Invalid UAID",
log_exception=False,
errno=105)
# Verify there's a node_id in here, if not we're done
node_id = uaid_data.get("node_id")
if not node_id:
returnValue(self.stored_response(notification))
try:
result = yield self._send_notification_check(uaid, node_id)
except (ConnectError, ConnectionClosed, ResponseFailed) as exc:
self.metrics.increment("updates.client.host_gone")
if isinstance(exc, ConnectionRefusedError):
self.log.debug("Could not route message: {exc}", exc=exc)
yield deferToThread(
router.clear_node,
uaid_data).addErrback(self._eat_db_err)
returnValue(self.stored_response(notification))
if result.code == 200:
returnValue(self.delivered_response(notification))
else:
ret_val = self.stored_response(notification)
returnValue(ret_val)
def delivered_response(self, notification):
self.metrics.increment("notification.message_data",
notification.data_length,
tags=make_tags(destination='Stored'))
location = "%s/m/%s" % (self.conf.endpoint_url, notification.location)
return RouterResponse(status_code=201, response_body="",
headers={"Location": location,
"TTL": notification.ttl or 0},
logged_status=200)
def stored_response(self, notification):
self.metrics.increment("notification.message_data",
notification.data_length,
tags=make_tags(destination='Direct'))
location = "%s/m/%s" % (self.conf.endpoint_url, notification.location)
return RouterResponse(status_code=201, response_body="",
headers={"Location": location,
"TTL": notification.ttl},
logged_status=202)
#############################################################
# Blocking Helper Functions
#############################################################
def _send_notification(self, uaid, node_id, notification):
"""Send a notification to a specific node_id
This version of the overriden method includes the necessary crypto
headers for the notification.
:type notification: autopush.utils.WebPushNotification
"""
payload = notification.serialize()
payload["timestamp"] = int(time.time())
url = node_id + "/push/" + uaid
request = self.agent.request(
"PUT",
url.encode("utf8"),
bodyProducer=FileBodyProducer(StringIO(json.dumps(payload))),
)
request.addCallback(IgnoreBody.ignore)
return request
def _send_notification_check(self, uaid, node_id):
"""Send a command to the node to check for notifications"""
url = node_id + "/notif/" + uaid
return self.agent.request(
"PUT",
url.encode("utf8"),
).addCallback(IgnoreBody.ignore)
def _save_notification(self, uaid_data, notification):
"""Saves a notification, returns a deferred.
This version of the overridden method saves each individual message
to the message table along with relevant request headers if
available.
:type uaid_data: dict
"""
month_table = uaid_data["current_month"]
if notification.ttl is None:
# Note that this URL is temporary, as well as this warning as
# we will 400 all missing TTL's eventually
raise RouterException(
"Missing TTL Header",
response_body="Missing TTL Header, see: %s" % TTL_URL,
status_code=400,
errno=111,
log_exception=False,
)
if notification.ttl == 0:
location = "%s/m/%s" % (self.conf.endpoint_url,
notification.version)
raise RouterException("Finished Routing", status_code=201,
log_exception=False,
headers={"TTL": str(notification.ttl),
"Location": location},
logged_status=204)
return deferToThread(
self.db.message_table(month_table).store_message,
notification=notification,
)
#############################################################
# Error Callbacks
#############################################################
def _eat_db_err(self, fail):
"""errBack for ignoring provisioned throughput errors"""
fail.trap(ClientError)
| autopush/router/webpush.py | 10,102 | Implements :class: `autopush.router.interface.IRouter` for internal
routing to an autopush node
Create a new Router
errBack for ignoring provisioned throughput errors
Saves a notification, returns a deferred.
This version of the overridden method saves each individual message
to the message table along with relevant request headers if
available.
:type uaid_data: dict
Send a notification to a specific node_id
This version of the overriden method includes the necessary crypto
headers for the notification.
:type notification: autopush.utils.WebPushNotification
Send a command to the node to check for notifications
Stubbed out for this router
No additional routing data
Route a notification to an internal node, and store it if the node
can't deliver immediately or is no longer a valid node
WebPush Style Autopush Router
This router handles notifications that should be dispatched to an Autopush
node, or stores each individual message, along with its data, in a Message
table for retrieval by the client.
noqa noqa type: (str, JSONDict, str, *Any, **Any) -> None type: (JSONDict, JSONDict) -> None Determine if they're connected at the moment Node_id is present, attempt delivery. - Send Notification to node - Success: Done, return 200 - Error (Node busy): Jump to Save notification below - Error (Client gone, node gone/dead): Clear node entry for user - Both: Done, return 503 Occurs if an IP record is now used by some other node in AWS or if the connection timesout. Save notification, node is not present or busy - Save notification - Success (older version): Done, return 202 - Error (db error): Done, return 503 - Lookup client again to get latest node state after save. - Success (node found): Notify node of new notification - Success: Done, return 200 - Error (no client): Done, return 202 - Error (no node): Clear node entry - Both: Done, return 202 - Success (no node): Done, return 202 - Error (db error): Done, return 202 - Error (no client) : Done, return 404 Verify there's a node_id in here, if not we're done Blocking Helper Functions Note that this URL is temporary, as well as this warning as we will 400 all missing TTL's eventually Error Callbacks | 2,262 | en | 0.770117 |
import json
import os
import pandas
import redis
import types
def json2redis(data,r):
if isinstance(data, types.ListType):
for row in data:
channel = row['channel']
data_type = row['data_type']
rkey = 'channel_{}_{}'.format(channel,data_type)
r.lpush(rkey,row)
else:
channel = data['channel']
data_type = data['data_type']
rkey = 'channel_{}_{}'.format(channel,data_type)
r.lpush(rkey,data)
# initialize redis connection for local and CF deployment
def connect_redis_db(redis_service_name = None):
if os.getenv('NODE_ENV') == 'micropcf':
DB_HOST = os.getenv('REDIS_HOST')
DB_PORT = os.getenv('REDIS_PORT')
DB_PW = os.getenv('REDIS_PASSWORD')
REDIS_DB = 0
elif os.environ.get('VCAP_SERVICES') is None: # running locally
DB_HOST = 'localhost'
DB_PORT = 6379
DB_PW = ''
REDIS_DB = 1
else: # running on CF
env_vars = os.environ['VCAP_SERVICES']
rediscloud_service = json.loads(env_vars)[redis_service_name][0]
credentials = rediscloud_service['credentials']
DB_HOST = credentials['host']
DB_PORT = credentials['port']
DB_PW = password=credentials['password']
REDIS_DB = 0
return redis.StrictRedis(host=DB_HOST,
port=DB_PORT,
password=DB_PW,
db=REDIS_DB)
| train-app/helper_functions.py | 1,519 | initialize redis connection for local and CF deployment running locally running on CF | 85 | en | 0.824793 |
#
# BSD 3-Clause License
#
# Copyright (c) 2019, Analog Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This class is used to generate delay register writes
import re
class regwrite_generator(object):
def __init__(self, seq_file):
self.code_dict = {}
self.data_dict = {}
self.seq_file = seq_file
def create_code_dict(self, text):
reg = re.compile(r'([0-9a-f]{4} [0-9a-f]{4})')
rawinfo = re.findall(reg, text)
for x in rawinfo:
s_line = re.split(r'\s', x)
addr = int(s_line[0],16)
data = int(s_line[2],16)
self.code_dict[addr] = data
return self.code_dict
def create_seq_info(self):
data_name = ['PulseCount', 'LD1_Tap', 'LD2_Tap', 'LD3_Tap', 'LD4_Tap', 'LD5_Tap', 'Pos_Off', 'Vec_Off', 'Start_Loc', 'Tbl_Len']
reg = re.compile(r'([0-9a-zA-Z]+)')
myfile = open(self.seq_file, 'r')
for line in myfile:
rawInfo = re.findall(reg, line)
if len(rawInfo) == 1:
currLabel = rawInfo[0]
if len(rawInfo) == 4:
curr_mode = rawInfo[1]
curr_seq = rawInfo[3]
i = 0
if curr_mode in self.data_dict:
self.data_dict[curr_mode][curr_seq] = {}
else:
self.data_dict[curr_mode] = {}
self.data_dict[curr_mode][curr_seq] = {}
for i in range(10):
rawInfo = re.findall(reg, myfile.readline())
self.data_dict[curr_mode][curr_seq][data_name[i]] = [int(rawInfo[0], 16), int(rawInfo[1], 16)]
myfile.close()
return self.data_dict
# Given mode, sweep specified ld for all sequences
def delay_sequences(self, mode, delay, ld):
delay_writes = {}
for x in self.data_dict[str(mode)]:
writes = self.delay_sequence_ld(delay, ld, self.data_dict[str(mode)][x])
delay_writes = dict(delay_writes, **writes)
return delay_writes
def generate_delay_writes(self, mode, delay_min, delay_max, ld):
writes_dict = {}
for x in range(delay_min, delay_max):
writes_dict[x] = self.delay_sequences(mode, x, ld)
return writes_dict
def setbit(self, bit, vec):
bit = 1 << bit
vec = vec | bit
return vec
def unsetbit(self, bit, vec):
bit = 1 << bit
bit = ~bit
vec = vec & bit
return vec
def get_blanking_values(self, ld, seq_dict):
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
if pos_len != vec_len:
print('Table length not equal')
start_loc = seq_dict['Start_Loc'][1]
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
pos_ptr = (seq_dict['Pos_Off'][1] * 2) + 0x4000
vec_ptr = (seq_dict['Vec_Off'][1] * 2) + 0x4000
blk_pos = -1
blk_neg = -1
for i in range(vec_len):
curr_vec = self.code_dict[vec_ptr + i]
if ((curr_vec >> (ld - 1)) & 0x0001) == 1:
if blk_pos == -1:
blk_pos = i
elif blk_neg == -1:
blk_neg = i
start_pos = start_loc + 2
pos_tbl = []
for i in range(pos_len):
if i == 0:
pos_tbl.append(self.code_dict[pos_ptr+i] + start_pos)
else:
pos_tbl.append(self.code_dict[pos_ptr+i] + pos_tbl[i-1])
blk_pos = pos_tbl[blk_pos]
blk_neg = pos_tbl[blk_neg]
return blk_pos, blk_neg
# Delay Sequence LD
def delay_sequence_ld(self, delay, ld, seq_dict):
taps = seq_dict['LD' + str(ld) + '_Tap'][1]
taps_addr = seq_dict['LD' + str(ld) + '_Tap'][0]
tap_pos = taps & 0x00ff
tap_neg = (taps & 0xff00) >> 8
blk_pos, blk_neg = self.get_blanking_values(ld, seq_dict)
blk_pos_shift = 0
blk_neg_shift = 0
tap_pos = tap_pos + delay
tap_neg = tap_neg + delay
while tap_pos >= 128:
blk_pos_shift += 1
tap_pos -= 128
while tap_neg >= 128:
blk_neg_shift += 1
tap_neg -= 128
while tap_pos < 0:
blk_pos_shift -= 1
tap_pos += 128
while tap_neg < 0:
blk_neg_shift -= 1
tap_neg += 128
blk_pos = blk_pos + blk_pos_shift
blk_neg = blk_neg + blk_neg_shift
tap_write = {}
tap_write[hex(taps_addr)] = (tap_neg << 8) + tap_pos
blk_writes = self.set_blanking_values(blk_pos, blk_neg, ld, seq_dict)
writes = dict(tap_write, **blk_writes)
return writes
# Set blanking vals
def set_blanking_values(self, blk_pos, blk_neg, ld, seq_dict):
start_loc = seq_dict['Start_Loc'][1]
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
pos_ptr = (seq_dict['Pos_Off'][1] * 2) + 0x4000
vec_ptr = (seq_dict['Vec_Off'][1] * 2) + 0x4000
start_pos = start_loc + 2
pos_tbl = []
for i in range(pos_len):
if i == 0:
pos_tbl.append(self.code_dict[pos_ptr+i] + start_pos)
else:
pos_tbl.append(self.code_dict[pos_ptr+i] + pos_tbl[i-1])
blk_pos_loc = pos_tbl.index(blk_pos)
blk_neg_loc = pos_tbl.index(blk_neg)
blk_writes = {}
for i in range(vec_len):
if i == blk_pos_loc:
curr_vec = self.setbit(ld-1, self.code_dict[vec_ptr + i])
elif i == blk_neg_loc:
curr_vec = self.setbit(ld-1, self.code_dict[vec_ptr + i])
else:
curr_vec = self.unsetbit(ld-1, self.code_dict[vec_ptr + i])
blk_writes[hex(vec_ptr + i)] = curr_vec
return blk_writes
| tools/calibration-96tof1/tof_calib/regwrite_generator.py | 7,622 | BSD 3-Clause License Copyright (c) 2019, Analog Devices, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This class is used to generate delay register writes Given mode, sweep specified ld for all sequences Delay Sequence LD Set blanking vals | 1,659 | en | 0.878174 |
#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author: nico
@file: pipline.py
@time: 2018/05/05
"""
from django.contrib.auth import get_user_model
from bloguser.utils import get_image_from_url
from uuid import uuid4
User = get_user_model()
def save_bloguser_extra_profile(backend, user, response, *args, **kwargs):
"""
see more:
http://python-social-auth.readthedocs.io/en/latest/use_cases.html#retrieve-google-friends
http://python-social-auth.readthedocs.io/en/latest/pipeline.html
:param backend:
:param user:
:param response:
:param args:
:param kwargs:
:return:
"""
if backend.name == 'github':
#这里获取保存用户github的头像的url,顺便保存到本地
image_url = response.get('avatar_url')
image_file = get_image_from_url(image_url)
if image_file is not None:
# 给头像文件命名采用uuid
avatar_name = 'avatar' + uuid4().hex[:16]
if user.image == 'bloguser/avatar.png':
#如果是默认头像,则替换掉,如果不是则不作处理
user.image.save(avatar_name, image_file)
#user.image_url = image_url
user.save() | apps/bloguser/pipline.py | 1,245 | see more:
http://python-social-auth.readthedocs.io/en/latest/use_cases.html#retrieve-google-friends
http://python-social-auth.readthedocs.io/en/latest/pipeline.html
:param backend:
:param user:
:param response:
:param args:
:param kwargs:
:return:
@author: nico
@file: pipline.py
@time: 2018/05/05
!usr/bin/env python -*- coding:utf-8 -*-这里获取保存用户github的头像的url,顺便保存到本地 给头像文件命名采用uuid如果是默认头像,则替换掉,如果不是则不作处理user.image_url = image_url | 455 | en | 0.36415 |
def extractMichilunWordpressCom(item):
'''
Parser for 'michilun.wordpress.com'
'''
bad = [
'Recommendations and Reviews',
]
if any([tmp in item['tags'] for tmp in bad]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Side Projects - Scheme of the Official Descendant', 'Scheme of the Official Descendant', 'translated'),
('Song in the Peach Blossoms', 'Song in the Peach Blossoms', 'translated'),
('Onrain (Online - The Novel)', 'Onrain (Online - The Novel)', 'translated'),
('At the End of the Wish', 'At the End of the Wish', 'translated'),
('Bringing Calamity to the Nation', 'Bringing Calamity to the Nation', 'translated'),
('Side Projects - The Flame\'s Daughter', 'The Flame\'s Daughter', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | WebMirror/management/rss_parser_funcs/feed_parse_extractMichilunWordpressCom.py | 1,539 | Parser for 'michilun.wordpress.com' | 35 | en | 0.297806 |
workers = 1 # 定义同时开启的处理请求的进程数量,根据网站流量适当调整
worker_class = "gevent" # 采用gevent库,支持异步处理请求,提高吞吐量
# bind = "0.0.0.0:80"
bind = "0.0.0.0:80"
| gunicorn.conf.py | 230 | 定义同时开启的处理请求的进程数量,根据网站流量适当调整 采用gevent库,支持异步处理请求,提高吞吐量 bind = "0.0.0.0:80" | 72 | zh | 0.774971 |
from toee import *
import char_class_utils
import char_editor
###################################################
def GetConditionName(): # used by API
return "Sorcerer"
# def GetSpellCasterConditionName():
# return "Sorcerer Spellcasting"
def GetCategory():
return "Core 3.5 Ed Classes"
def GetClassDefinitionFlags():
return CDF_BaseClass | CDF_CoreClass
def GetClassHelpTopic():
return "TAG_SORCERERS"
classEnum = stat_level_sorcerer
###################################################
class_feats = {
1: (feat_simple_weapon_proficiency, feat_call_familiar)
}
class_skills = (skill_alchemy, skill_bluff, skill_concentration, skill_craft, skill_knowledge_arcana, skill_profession, skill_spellcraft)
spells_per_day = {
1: (5, 3),
2: (6, 4),
3: (6, 5),
4: (6, 6, 3),
5: (6, 6, 4),
6: (6, 6, 5, 3),
7: (6, 6, 6, 4),
8: (6, 6, 6, 5, 3),
9: (6, 6, 6, 6, 4),
10: (6, 6, 6, 6, 5, 3),
11: (6, 6, 6, 6, 6, 4),
12: (6, 6, 6, 6, 6, 5, 3),
13: (6, 6, 6, 6, 6, 6, 4),
14: (6, 6, 6, 6, 6, 6, 5, 3),
15: (6, 6, 6, 6, 6, 6, 6, 4),
16: (6, 6, 6, 6, 6, 6, 6, 5, 3),
17: (6, 6, 6, 6, 6, 6, 6, 6, 4),
18: (6, 6, 6, 6, 6, 6, 6, 6, 5, 3),
19: (6, 6, 6, 6, 6, 6, 6, 6, 6, 4),
20: (6, 6, 6, 6, 6, 6, 6, 6, 6, 6)
#lvl 0 1 2 3 4 5 6 7 8 9
}
spells_known = {
1: (4, 2),
2: (5, 2),
3: (5, 3),
4: (6, 3, 1),
5: (6, 4, 2),
6: (7, 4, 2, 1),
7: (7, 5, 3, 2),
8: (8, 5, 3, 2, 1),
9: (8, 5, 4, 3, 2),
10: (9, 5, 4, 3, 2, 1),
11: (9, 5, 5, 4, 3, 2),
12: (9, 5, 5, 4, 3, 2, 1),
13: (9, 5, 5, 4, 4, 3, 2),
14: (9, 5, 5, 4, 4, 3, 2, 1),
15: (9, 5, 5, 4, 4, 4, 3, 2),
16: (9, 5, 5, 4, 4, 4, 3, 2, 1),
17: (9, 5, 5, 4, 4, 4, 3, 3, 2),
18: (9, 5, 5, 4, 4, 4, 3, 3, 2, 1),
19: (9, 5, 5, 4, 4, 4, 3, 3, 3, 2),
20: (9, 5, 5, 4, 4, 4, 3, 3, 3, 3)
#lvl 0 1 2 3 4 5 6 7 8 9
}
def GetHitDieType():
return 4
def GetSkillPtsPerLevel():
return 2
def GetBabProgression():
return base_attack_bonus_type_non_martial
def IsFortSaveFavored():
return 0
def IsRefSaveFavored():
return 0
def IsWillSaveFavored():
return 1
# Spell casting
def GetSpellListType():
return spell_list_type_arcane
def GetSpellSourceType():
return spell_source_type_arcane
def GetSpellReadyingType():
return spell_readying_innate
def GetSpellsPerDay():
return spells_per_day
caster_levels = range(1, 21)
def GetCasterLevels():
return caster_levels
def GetSpellDeterminingStat():
return stat_charisma
def IsClassSkill(skillEnum):
return char_class_utils.IsClassSkill(class_skills, skillEnum)
def IsClassFeat(featEnum):
return char_class_utils.IsClassFeat(class_feats, featEnum)
def GetClassFeats():
return class_feats
def IsAlignmentCompatible( alignment):
return 1
def ObjMeetsPrereqs( obj ):
abScore = obj.stat_base_get(stat_charisma)
if abScore > 10:
return 1
return 0
## Levelup callbacks
def IsSelectingSpellsOnLevelup( obj ):
return 1
def InitSpellSelection( obj, classLvlNew = -1, classLvlIncrement = 1):
classLvl = obj.stat_level_get(classEnum)
if classLvlNew <= 0:
classLvlNew = classLvl + 1
maxSpellLvl = char_editor.get_max_spell_level( obj, classEnum, classLvlNew ) # this regards spell list extension by stuff like Mystic Theurge
# Available Spells
spAvail = char_editor.get_learnable_spells(obj, classEnum, maxSpellLvl)
# add spell level labels
for p in range(0,maxSpellLvl+1):
spAvail.append(char_editor.KnownSpellInfo(spell_label_level_0 + p, 0, classEnum))
spAvail.sort()
char_editor.append_available_spells(spAvail)
# newly taken class
if classLvlNew == 1:
spEnums = []
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_0, 0, classEnum)) # add "Level 0" label
for p in range(0,4): # 4 cantrips
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_0, 3, classEnum))
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_1, 0, classEnum)) # add "Level 1" label
for p in range(0,2): # 2 level 1 spells
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_1, 3, classEnum))
char_editor.append_spell_enums(spEnums)
return 0
# Incrementing class level
spellListLvl = obj.stat_level_get(stat_spell_list_level, classEnum) + classLvlIncrement # the effective level for getting the number of spells known
spEnums = char_editor.get_known_class_spells(obj, classEnum) # get all spells known for this class
for spellLvl in range(0, maxSpellLvl+1):
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_0 + spellLvl, 0, classEnum)) # add label
# add spells
newSpellsKnownCount = char_class_utils.GetSpellsKnownAddedCount( spells_known , spellListLvl, spellLvl)
print "new num spells for spell level " + str(spellLvl) + ": " + str(newSpellsKnownCount)
for q in range(0, newSpellsKnownCount):
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_0 + spellLvl, 3, classEnum))
isReplacing = 0
if spellListLvl >= 4 and (spellListLvl % 2) == 0: # spell replacement
isReplacing = 1
if char_editor.get_class_code() != classEnum: #grant this benefit only for strict levelup (also to prevent some headache...)
isReplacing = 0
if isReplacing == 0:
spEnums.sort()
char_editor.append_spell_enums(spEnums)
return 0
# mark as replaceable
for p in range(0,len(spEnums)):
spEnum = spEnums[p].spell_enum
if spell_vacant <= spEnum <= spell_label_level_9:
continue
if spell_new_slot_lvl_0 <= spEnum <= spell_new_slot_lvl_9:
continue
if char_editor.get_spell_level(spEnum, classEnum) <= maxSpellLvl-2:
spEnums[p].spell_status = 1 # marked as replaceable
spEnums.sort()
char_editor.append_spell_enums(spEnums)
return 0
def LevelupCheckSpells( obj ):
classLvl = obj.stat_level_get(classEnum)
classLvlNew = classLvl + 1
maxSpellLvl = char_editor.get_max_spell_level( obj, classEnum, classLvlNew )
spell_enums = char_editor.get_spell_enums()
for spInfo in spell_enums:
if spInfo.spell_enum == spell_vacant:
if maxSpellLvl >= 4 and spInfo.spell_level == 0: # in case the cantrips are causing problems
continue
return 0
return 1
def LevelupSpellsFinalize( obj, classLvlNew = -1 ):
spEnums = char_editor.get_spell_enums()
char_editor.spell_known_add(spEnums) # internally takes care of duplicates and the labels/vacant slots
return | tpdatasrc/tpgamefiles/rules/char_class/class016_sorcerer.py | 6,202 | used by API def GetSpellCasterConditionName(): return "Sorcerer Spellcasting"lvl 0 1 2 3 4 5 6 7 8 9lvl 0 1 2 3 4 5 6 7 8 9 Spell casting Levelup callbacks this regards spell list extension by stuff like Mystic Theurge Available Spells add spell level labels newly taken class add "Level 0" label 4 cantrips add "Level 1" label 2 level 1 spells Incrementing class level the effective level for getting the number of spells known get all spells known for this class add label add spells spell replacementgrant this benefit only for strict levelup (also to prevent some headache...) mark as replaceable marked as replaceable in case the cantrips are causing problems internally takes care of duplicates and the labels/vacant slots | 746 | en | 0.737428 |
# Third Party
import mxnet as mx
from mxnet.ndarray import NDArray
# First Party
from smdebug.core.collection import DEFAULT_MXNET_COLLECTIONS, CollectionKeys
from smdebug.core.hook import CallbackHook
from smdebug.core.json_config import DEFAULT_WORKER_NAME
from smdebug.core.utils import FRAMEWORK, error_handling_agent
from smdebug.mxnet.collection import CollectionManager
from smdebug.mxnet.graph import _net2pb
from smdebug.mxnet.singleton_utils import set_hook
from smdebug.mxnet.utils import get_reduction_of_data, make_numpy_array
from smdebug.profiler.profiler_config_parser import get_profiler_config_parser
DEFAULT_INCLUDE_COLLECTIONS = [CollectionKeys.LOSSES]
COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK = [
CollectionKeys.WEIGHTS,
CollectionKeys.BIASES,
CollectionKeys.GRADIENTS,
CollectionKeys.LOSSES,
]
profiler_config_parser = get_profiler_config_parser(FRAMEWORK.PYTORCH)
class Hook(CallbackHook):
def __init__(
self,
out_dir=None,
export_tensorboard=False,
tensorboard_dir=None,
dry_run=False,
reduction_config=None,
save_config=None,
include_regex=None,
include_collections=None,
save_all=False,
include_workers="one",
):
collection_manager = CollectionManager()
super().__init__(
collection_manager=collection_manager,
default_include_collections=DEFAULT_INCLUDE_COLLECTIONS,
profiler_config_parser=profiler_config_parser,
data_type_name=mx.ndarray.NDArray.__name__,
out_dir=out_dir,
export_tensorboard=export_tensorboard,
tensorboard_dir=tensorboard_dir,
dry_run=dry_run,
reduction_config=reduction_config,
save_config=save_config,
include_regex=include_regex,
include_collections=include_collections,
save_all=save_all,
include_workers=include_workers,
)
self.last_block = None
self.model = None
self.exported_model = False
# Keep the set of blocks to which this hook is registered. The blocks include loss blocks as well.
self.registered_blocks = set()
self.worker = self._get_worker_name()
set_hook(self)
def _get_worker_name(self):
try:
import horovod.mxnet as hvd
if hvd.size():
return f"worker_{hvd.rank()}"
except (ModuleNotFoundError, ValueError, ImportError):
pass
return DEFAULT_WORKER_NAME
def _get_num_workers(self):
try:
import horovod.mxnet as hvd
if hvd.size():
return hvd.size()
except (ModuleNotFoundError, ValueError, ImportError):
pass
return 1
def _cleanup(self):
# Write the gradients of the past step if the writer is still available.
if self.writer is not None and self.last_block is not None:
self._log_params(self.last_block)
if self.exported_model is False:
self._export_model()
super()._cleanup()
def _log_params(self, block):
params = block.collect_params().values()
for param in params:
self._log_param(param)
def _log_param(self, param):
try:
self._save_for_tensor(
tensor_name=param.name, tensor_value=param.data(param.list_ctx()[0])
)
# If Gradient for this param is available
if param.grad_req != "null":
self._save_for_tensor(
tensor_name=self.GRADIENT_PREFIX + param.name,
tensor_value=param.grad(param.list_ctx()[0]),
)
except RuntimeError as e:
self.logger.warning(
f"Could not log parameter {param.name} due to the mxnet exception: {e}"
)
def _export_model(self):
if self.model is not None:
try:
tb_writer = self._maybe_get_tb_writer()
if tb_writer:
tb_writer.write_graph(_net2pb(self.model))
except (RuntimeError, TypeError) as e:
self.logger.warning(
f"Could not export model graph for tensorboard "
f"due to the mxnet exception: {e}"
)
def _get_default_collections(self):
return DEFAULT_MXNET_COLLECTIONS
# This hook is invoked by trainer prior to running the forward pass.
@error_handling_agent.catch_smdebug_errors()
def forward_pre_hook(self, block, inputs):
if self.writer is not None:
# Write the params and gradients of the
# past step if the writer is still available.
self._log_params(block)
self._close_writers()
self._close_tb_writer()
if not self.prepared_collections:
# at this point we need all collections to be ready
# this may not be the case at creation of hook
# as user's code after hook might add collections
self._prepare_collections()
self.prepared_collections = True
self._increment_step()
if self._get_collections_to_save_for_step():
self._initialize_writers()
if self.exported_model is False:
self._export_model()
self.exported_model = True
if self.last_saved_step is not None and not self.exported_collections:
self.export_collections()
self.exported_collections = True
self.last_block = block
self._save_custom_tensors_post_step()
# This hook is invoked by trainer after running the forward pass.
@error_handling_agent.catch_smdebug_errors()
def forward_hook(self, block, inputs, outputs):
if not self._get_collections_to_save_for_step():
return
block_name = block.name
# This overwhelms the logs; turn back on if you really need it
# logger.debug("Processing the global step {0} for block {1}".format(self.step, block_name))
# Output input tensor
self._write_inputs(block_name, inputs)
# Output output tensors
self._write_outputs(block_name, outputs)
self.last_saved_step = self.step
def _recursive_apply(self, block):
"""
This function is "applied" to every child in the block. This function in turn
registers the forward hook to each module. It helps logging the input output tensors
of that module.
"""
# Check if the hook is already registered for this block.
if block in self.registered_blocks:
self.logger.warning(f"The hook is already registered to block {block.name}")
return
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
def _is_recursive_needed(self):
collections_to_save = self.include_collections
# Check if default collection has a regex associated with it.
# If it does we would need to apply hook recursively.
if (
len(self.collection_manager.get(CollectionKeys.DEFAULT).include_regex) != 0
and CollectionKeys.DEFAULT in collections_to_save
):
return True
# Get the collections that are to be saved but are not part of default collections
# We will need to apply hook recursively to get tensors specified in those collections.
extra_coll = [
value
for value in collections_to_save
if value not in COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK
]
# extra_coll contains the collections that are not part of default collections.
return len(extra_coll) != 0
def register_hook(self, block):
# for compatibility with ZCC patches which call this
self.register_block(block)
@error_handling_agent.catch_smdebug_errors()
def register_block(self, block):
"""
This function registers the forward hook. If user wants to register the hook
for every child in the given block, then the function calls "apply" API for
registration of the hook.
The hook is registered recursively, if user has specified the collections that are more than
the default collectors viz. gradients, weight and bias
"""
if not isinstance(block, mx.gluon.Block):
self.logger.error(f"The given block type {block.__class__.__name__} is unsupported.")
return
# Check if the hook is already registered for this block.
if block in self.registered_blocks:
self.logger.warning(f"The hook is already registered to block {block.name}")
return
# Skip the forward pre hook for the Loss blocks.
if isinstance(block, mx.gluon.loss.Loss):
self.logger.info(f"Registering hook for block {block.name}")
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
return
else:
self.model = block
is_recursive = self._is_recursive_needed()
block.register_forward_pre_hook(self.forward_pre_hook)
if is_recursive is True:
block.apply(self._recursive_apply)
else:
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
@staticmethod
def _get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs):
return get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs)
@staticmethod
def _make_numpy_array(tensor_value):
if isinstance(tensor_value, NDArray):
return tensor_value.asnumpy()
return make_numpy_array(tensor_value)
| smdebug/mxnet/hook.py | 9,853 | This function is "applied" to every child in the block. This function in turn
registers the forward hook to each module. It helps logging the input output tensors
of that module.
This function registers the forward hook. If user wants to register the hook
for every child in the given block, then the function calls "apply" API for
registration of the hook.
The hook is registered recursively, if user has specified the collections that are more than
the default collectors viz. gradients, weight and bias
Third Party First Party Keep the set of blocks to which this hook is registered. The blocks include loss blocks as well. Write the gradients of the past step if the writer is still available. If Gradient for this param is available This hook is invoked by trainer prior to running the forward pass. Write the params and gradients of the past step if the writer is still available. at this point we need all collections to be ready this may not be the case at creation of hook as user's code after hook might add collections This hook is invoked by trainer after running the forward pass. This overwhelms the logs; turn back on if you really need it logger.debug("Processing the global step {0} for block {1}".format(self.step, block_name)) Output input tensor Output output tensors Check if the hook is already registered for this block. Check if default collection has a regex associated with it. If it does we would need to apply hook recursively. Get the collections that are to be saved but are not part of default collections We will need to apply hook recursively to get tensors specified in those collections. extra_coll contains the collections that are not part of default collections. for compatibility with ZCC patches which call this Check if the hook is already registered for this block. Skip the forward pre hook for the Loss blocks. | 1,856 | en | 0.895963 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .application_base import ApplicationBase
class ApplicationUpdateParameters(ApplicationBase):
"""Request parameters for updating a new application.
:param allow_guests_sign_in: A property on the application to indicate if
the application accepts other IDPs or not or partially accepts.
:type allow_guests_sign_in: bool
:param allow_passthrough_users: Indicates that the application supports
pass through users who have no presence in the resource tenant.
:type allow_passthrough_users: bool
:param app_logo_url: The url for the application logo image stored in a
CDN.
:type app_logo_url: str
:param app_roles: The collection of application roles that an application
may declare. These roles can be assigned to users, groups or service
principals.
:type app_roles: list[~azure.graphrbac.models.AppRole]
:param app_permissions: The application permissions.
:type app_permissions: list[str]
:param available_to_other_tenants: Whether the application is available to
other tenants.
:type available_to_other_tenants: bool
:param error_url: A URL provided by the author of the application to
report errors when using the application.
:type error_url: str
:param group_membership_claims: Configures the groups claim issued in a
user or OAuth 2.0 access token that the app expects. Possible values
include: 'None', 'SecurityGroup', 'All'
:type group_membership_claims: str or
~azure.graphrbac.models.GroupMembershipClaimTypes
:param homepage: The home page of the application.
:type homepage: str
:param informational_urls: URLs with more information about the
application.
:type informational_urls: ~azure.graphrbac.models.InformationalUrl
:param is_device_only_auth_supported: Specifies whether this application
supports device authentication without a user. The default is false.
:type is_device_only_auth_supported: bool
:param key_credentials: A collection of KeyCredential objects.
:type key_credentials: list[~azure.graphrbac.models.KeyCredential]
:param known_client_applications: Client applications that are tied to
this resource application. Consent to any of the known client applications
will result in implicit consent to the resource application through a
combined consent dialog (showing the OAuth permission scopes required by
the client and the resource).
:type known_client_applications: list[str]
:param logout_url: the url of the logout page
:type logout_url: str
:param oauth2_allow_implicit_flow: Whether to allow implicit grant flow
for OAuth2
:type oauth2_allow_implicit_flow: bool
:param oauth2_allow_url_path_matching: Specifies whether during a token
Request Azure AD will allow path matching of the redirect URI against the
applications collection of replyURLs. The default is false.
:type oauth2_allow_url_path_matching: bool
:param oauth2_permissions: The collection of OAuth 2.0 permission scopes
that the web API (resource) application exposes to client applications.
These permission scopes may be granted to client applications during
consent.
:type oauth2_permissions: list[~azure.graphrbac.models.OAuth2Permission]
:param oauth2_require_post_response: Specifies whether, as part of OAuth
2.0 token requests, Azure AD will allow POST requests, as opposed to GET
requests. The default is false, which specifies that only GET requests
will be allowed.
:type oauth2_require_post_response: bool
:param org_restrictions: A list of tenants allowed to access application.
:type org_restrictions: list[str]
:param optional_claims:
:type optional_claims: ~azure.graphrbac.models.OptionalClaims
:param password_credentials: A collection of PasswordCredential objects
:type password_credentials:
list[~azure.graphrbac.models.PasswordCredential]
:param pre_authorized_applications: list of pre-authorized applications.
:type pre_authorized_applications:
list[~azure.graphrbac.models.PreAuthorizedApplication]
:param public_client: Specifies whether this application is a public
client (such as an installed application running on a mobile device).
Default is false.
:type public_client: bool
:param publisher_domain: Reliable domain which can be used to identify an
application.
:type publisher_domain: str
:param reply_urls: A collection of reply URLs for the application.
:type reply_urls: list[str]
:param required_resource_access: Specifies resources that this application
requires access to and the set of OAuth permission scopes and application
roles that it needs under each of those resources. This pre-configuration
of required resource access drives the consent experience.
:type required_resource_access:
list[~azure.graphrbac.models.RequiredResourceAccess]
:param saml_metadata_url: The URL to the SAML metadata for the
application.
:type saml_metadata_url: str
:param sign_in_audience: Audience for signing in to the application
(AzureADMyOrganization, AzureADAllOrganizations,
AzureADAndMicrosoftAccounts).
:type sign_in_audience: str
:param www_homepage: The primary Web page.
:type www_homepage: str
:param display_name: The display name of the application.
:type display_name: str
:param identifier_uris: A collection of URIs for the application.
:type identifier_uris: list[str]
"""
_attribute_map = {
'allow_guests_sign_in': {'key': 'allowGuestsSignIn', 'type': 'bool'},
'allow_passthrough_users': {'key': 'allowPassthroughUsers', 'type': 'bool'},
'app_logo_url': {'key': 'appLogoUrl', 'type': 'str'},
'app_roles': {'key': 'appRoles', 'type': '[AppRole]'},
'app_permissions': {'key': 'appPermissions', 'type': '[str]'},
'available_to_other_tenants': {'key': 'availableToOtherTenants', 'type': 'bool'},
'error_url': {'key': 'errorUrl', 'type': 'str'},
'group_membership_claims': {'key': 'groupMembershipClaims', 'type': 'str'},
'homepage': {'key': 'homepage', 'type': 'str'},
'informational_urls': {'key': 'informationalUrls', 'type': 'InformationalUrl'},
'is_device_only_auth_supported': {'key': 'isDeviceOnlyAuthSupported', 'type': 'bool'},
'key_credentials': {'key': 'keyCredentials', 'type': '[KeyCredential]'},
'known_client_applications': {'key': 'knownClientApplications', 'type': '[str]'},
'logout_url': {'key': 'logoutUrl', 'type': 'str'},
'oauth2_allow_implicit_flow': {'key': 'oauth2AllowImplicitFlow', 'type': 'bool'},
'oauth2_allow_url_path_matching': {'key': 'oauth2AllowUrlPathMatching', 'type': 'bool'},
'oauth2_permissions': {'key': 'oauth2Permissions', 'type': '[OAuth2Permission]'},
'oauth2_require_post_response': {'key': 'oauth2RequirePostResponse', 'type': 'bool'},
'org_restrictions': {'key': 'orgRestrictions', 'type': '[str]'},
'optional_claims': {'key': 'optionalClaims', 'type': 'OptionalClaims'},
'password_credentials': {'key': 'passwordCredentials', 'type': '[PasswordCredential]'},
'pre_authorized_applications': {'key': 'preAuthorizedApplications', 'type': '[PreAuthorizedApplication]'},
'public_client': {'key': 'publicClient', 'type': 'bool'},
'publisher_domain': {'key': 'publisherDomain', 'type': 'str'},
'reply_urls': {'key': 'replyUrls', 'type': '[str]'},
'required_resource_access': {'key': 'requiredResourceAccess', 'type': '[RequiredResourceAccess]'},
'saml_metadata_url': {'key': 'samlMetadataUrl', 'type': 'str'},
'sign_in_audience': {'key': 'signInAudience', 'type': 'str'},
'www_homepage': {'key': 'wwwHomepage', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'identifier_uris': {'key': 'identifierUris', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ApplicationUpdateParameters, self).__init__(**kwargs)
self.display_name = kwargs.get('display_name', None)
self.identifier_uris = kwargs.get('identifier_uris', None)
| sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/application_update_parameters.py | 8,738 | Request parameters for updating a new application.
:param allow_guests_sign_in: A property on the application to indicate if
the application accepts other IDPs or not or partially accepts.
:type allow_guests_sign_in: bool
:param allow_passthrough_users: Indicates that the application supports
pass through users who have no presence in the resource tenant.
:type allow_passthrough_users: bool
:param app_logo_url: The url for the application logo image stored in a
CDN.
:type app_logo_url: str
:param app_roles: The collection of application roles that an application
may declare. These roles can be assigned to users, groups or service
principals.
:type app_roles: list[~azure.graphrbac.models.AppRole]
:param app_permissions: The application permissions.
:type app_permissions: list[str]
:param available_to_other_tenants: Whether the application is available to
other tenants.
:type available_to_other_tenants: bool
:param error_url: A URL provided by the author of the application to
report errors when using the application.
:type error_url: str
:param group_membership_claims: Configures the groups claim issued in a
user or OAuth 2.0 access token that the app expects. Possible values
include: 'None', 'SecurityGroup', 'All'
:type group_membership_claims: str or
~azure.graphrbac.models.GroupMembershipClaimTypes
:param homepage: The home page of the application.
:type homepage: str
:param informational_urls: URLs with more information about the
application.
:type informational_urls: ~azure.graphrbac.models.InformationalUrl
:param is_device_only_auth_supported: Specifies whether this application
supports device authentication without a user. The default is false.
:type is_device_only_auth_supported: bool
:param key_credentials: A collection of KeyCredential objects.
:type key_credentials: list[~azure.graphrbac.models.KeyCredential]
:param known_client_applications: Client applications that are tied to
this resource application. Consent to any of the known client applications
will result in implicit consent to the resource application through a
combined consent dialog (showing the OAuth permission scopes required by
the client and the resource).
:type known_client_applications: list[str]
:param logout_url: the url of the logout page
:type logout_url: str
:param oauth2_allow_implicit_flow: Whether to allow implicit grant flow
for OAuth2
:type oauth2_allow_implicit_flow: bool
:param oauth2_allow_url_path_matching: Specifies whether during a token
Request Azure AD will allow path matching of the redirect URI against the
applications collection of replyURLs. The default is false.
:type oauth2_allow_url_path_matching: bool
:param oauth2_permissions: The collection of OAuth 2.0 permission scopes
that the web API (resource) application exposes to client applications.
These permission scopes may be granted to client applications during
consent.
:type oauth2_permissions: list[~azure.graphrbac.models.OAuth2Permission]
:param oauth2_require_post_response: Specifies whether, as part of OAuth
2.0 token requests, Azure AD will allow POST requests, as opposed to GET
requests. The default is false, which specifies that only GET requests
will be allowed.
:type oauth2_require_post_response: bool
:param org_restrictions: A list of tenants allowed to access application.
:type org_restrictions: list[str]
:param optional_claims:
:type optional_claims: ~azure.graphrbac.models.OptionalClaims
:param password_credentials: A collection of PasswordCredential objects
:type password_credentials:
list[~azure.graphrbac.models.PasswordCredential]
:param pre_authorized_applications: list of pre-authorized applications.
:type pre_authorized_applications:
list[~azure.graphrbac.models.PreAuthorizedApplication]
:param public_client: Specifies whether this application is a public
client (such as an installed application running on a mobile device).
Default is false.
:type public_client: bool
:param publisher_domain: Reliable domain which can be used to identify an
application.
:type publisher_domain: str
:param reply_urls: A collection of reply URLs for the application.
:type reply_urls: list[str]
:param required_resource_access: Specifies resources that this application
requires access to and the set of OAuth permission scopes and application
roles that it needs under each of those resources. This pre-configuration
of required resource access drives the consent experience.
:type required_resource_access:
list[~azure.graphrbac.models.RequiredResourceAccess]
:param saml_metadata_url: The URL to the SAML metadata for the
application.
:type saml_metadata_url: str
:param sign_in_audience: Audience for signing in to the application
(AzureADMyOrganization, AzureADAllOrganizations,
AzureADAndMicrosoftAccounts).
:type sign_in_audience: str
:param www_homepage: The primary Web page.
:type www_homepage: str
:param display_name: The display name of the application.
:type display_name: str
:param identifier_uris: A collection of URIs for the application.
:type identifier_uris: list[str]
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 5,508 | en | 0.695226 |
## Creates 404 page
import pystache
import utils
def main(data):
html = pystache.render(data["templates"]["page"], {
"title": "Page not found",
"description": "Error 404: page not found",
## Since we don't know the depth of this page relative to the root,
## we have to assume the db directory is located in the root of this web resource
"navigation": utils.generateTopBarNavigation("/" + data["config"].get("Site", "DbPath")),
"name": "error",
"content": pystache.render(data["templates"]["not-found-page-contents"]),
## Since we don't know the depth of this page relative to the root,
## we have to assume the search page is located in the root of this web resource
"search": "/" + data["definitions"]["filenames"]["search"],
})
notFoundFile = utils.mkfile(
data["definitions"]["runtime"]["cwd"],
data["config"].get("Filesystem", "DestinationDirPath"),
data["definitions"]["filenames"]["notfound"],
)
notFoundFile.write(html)
notFoundFile.close()
| website-generator.d/80-not-found-page.py | 1,103 | Creates 404 page Since we don't know the depth of this page relative to the root, we have to assume the db directory is located in the root of this web resource Since we don't know the depth of this page relative to the root, we have to assume the search page is located in the root of this web resource | 303 | en | 0.977143 |
# Copyright (c) 2010-2020 openpyxlzip
# package imports
from openpyxlzip.reader.excel import load_workbook
from openpyxlzip.xml.functions import tostring, fromstring
from openpyxlzip.styles import Border, Side, PatternFill, Color, Font, fills, borders, colors
from openpyxlzip.styles.differential import DifferentialStyle, DifferentialStyleList
from openpyxlzip.formatting.formatting import ConditionalFormattingList
from openpyxlzip.formatting.rule import CellIsRule, FormulaRule, Rule
# test imports
import pytest
from openpyxlzip.tests.helper import compare_xml
class DummyWorkbook():
def __init__(self):
self._differential_styles = DifferentialStyleList()
self.worksheets = []
class DummyWorksheet():
def __init__(self):
self.conditional_formatting = ConditionalFormattingList()
self.parent = DummyWorkbook()
def test_conditional_formatting_read(datadir):
datadir.chdir()
reference_file = 'conditional-formatting.xlsx'
wb = load_workbook(reference_file)
ws = wb.active
rules = ws.conditional_formatting
assert len(rules) == 30
# First test the conditional formatting rules read
rule = rules['A1:A1048576'][0]
assert dict(rule) == {'priority':'30', 'type': 'colorScale', }
rule = rules['B1:B10'][0]
assert dict(rule) == {'priority': '29', 'type': 'colorScale'}
rule = rules['C1:C10'][0]
assert dict(rule) == {'priority': '28', 'type': 'colorScale'}
rule = rules['D1:D10'][0]
assert dict(rule) == {'priority': '27', 'type': 'colorScale', }
rule = rules['E1:E10'][0]
assert dict(rule) == {'priority': '26', 'type': 'colorScale', }
rule = rules['F1:F10'][0]
assert dict(rule) == {'priority': '25', 'type': 'colorScale', }
rule = rules['G1:G10'][0]
assert dict(rule) == {'priority': '24', 'type': 'colorScale', }
rule = rules['H1:H10'][0]
assert dict(rule) == {'priority': '23', 'type': 'colorScale', }
rule = rules['I1:I10'][0]
assert dict(rule) == {'priority': '22', 'type': 'colorScale', }
rule = rules['J1:J10'][0]
assert dict(rule) == {'priority': '21', 'type': 'colorScale', }
rule = rules['K1:K10'][0]
assert dict(rule) == {'priority': '20', 'type': 'dataBar'}
rule = rules['L1:L10'][0]
assert dict(rule) == {'priority': '19', 'type': 'dataBar'}
rule = rules['M1:M10'][0]
assert dict(rule) == {'priority': '18', 'type': 'dataBar'}
rule = rules['N1:N10'][0]
assert dict(rule) == {'priority': '17', 'type': 'iconSet'}
rule = rules['O1:O10'][0]
assert dict(rule) == {'priority': '16', 'type': 'iconSet'}
rule = rules['P1:P10'][0]
assert dict(rule) == {'priority': '15', 'type': 'iconSet'}
rule = rules['Q1:Q10'][0]
assert dict(rule) == {'text': '3', 'priority': '14', 'dxfId': '27',
'operator': 'containsText', 'type': 'containsText'}
assert rule.dxf == DifferentialStyle(font=Font(color='FF9C0006'),
fill=PatternFill(bgColor='FFFFC7CE')
)
rule = rules['R1:R10'][0]
assert dict(rule) == {'operator': 'between', 'dxfId': '26', 'type':
'cellIs', 'priority': '13'}
assert rule.dxf == DifferentialStyle(font=Font(color='FF9C6500'),
fill=PatternFill(bgColor='FFFFEB9C'))
rule = rules['S1:S10'][0]
assert dict(rule) == {'priority': '12', 'dxfId': '25', 'percent': '1',
'type': 'top10', 'rank': '10'}
rule = rules['T1:T10'][0]
assert dict(rule) == {'priority': '11', 'dxfId': '24', 'type': 'top10',
'rank': '4', 'bottom': '1'}
rule = rules['U1:U10'][0]
assert dict(rule) == {'priority': '10', 'dxfId': '23', 'type':
'aboveAverage'}
rule = rules['V1:V10'][0]
assert dict(rule) == {'aboveAverage': '0', 'dxfId': '22', 'type':
'aboveAverage', 'priority': '9'}
rule = rules['W1:W10'][0]
assert dict(rule) == {'priority': '8', 'dxfId': '21', 'type':
'aboveAverage', 'equalAverage': '1'}
rule = rules['X1:X10'][0]
assert dict(rule) == {'aboveAverage': '0', 'dxfId': '20', 'priority': '7',
'type': 'aboveAverage', 'equalAverage': '1'}
rule = rules['Y1:Y10'][0]
assert dict(rule) == {'priority': '6', 'dxfId': '19', 'type':
'aboveAverage', 'stdDev': '1'}
rule = rules['Z1:Z10'][0]
assert dict(rule)== {'aboveAverage': '0', 'dxfId': '18', 'type':
'aboveAverage', 'stdDev': '1', 'priority': '5'}
assert rule.dxf == DifferentialStyle(font=Font(b=True, i=True, color='FF9C0006'),
fill=PatternFill(bgColor='FFFFC7CE'),
border=Border(
left=Side(style='thin', color=Color(theme=5)),
right=Side(style='thin', color=Color(theme=5)),
top=Side(style='thin', color=Color(theme=5)),
bottom=Side(style='thin', color=Color(theme=5))
)
)
rule = rules['AA1:AA10'][0]
assert dict(rule) == {'priority': '4', 'dxfId': '17', 'type':
'aboveAverage', 'stdDev': '2'}
rule = rules['AB1:AB10'][0]
assert dict(rule) == {'priority': '3', 'dxfId': '16', 'type':
'duplicateValues'}
rule = rules['AC1:AC10'][0]
assert dict(rule) == {'priority': '2', 'dxfId': '15', 'type':
'uniqueValues'}
rule = rules['AD1:AD10'][0]
assert dict(rule) == {'priority': '1', 'dxfId': '14', 'type': 'expression',}
@pytest.fixture
def ConditionalFormatting():
from ..formatting import ConditionalFormatting
return ConditionalFormatting
class TestConditionalFormatting:
def test_ctor(self, ConditionalFormatting):
cf = ConditionalFormatting(sqref="A1:B5")
xml = tostring(cf.to_tree())
expected = """
<conditionalFormatting sqref="A1:B5" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_tree(self, ConditionalFormatting):
src = """
<conditionalFormatting sqref="A1:B5" />
"""
tree = fromstring(src)
cf = ConditionalFormatting.from_tree(tree)
assert cf.sqref == "A1:B5"
def test_eq(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
c2 = ConditionalFormatting("A1:B5", pivot=True)
assert c1 == c2
def test_hash(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
assert hash(c1) == hash("A1:B5")
def test_repr(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
assert repr(c1) == "<ConditionalFormatting A1:B5>"
def test_contains(self, ConditionalFormatting):
c2 = ConditionalFormatting("A1:A5 B1:B5")
assert "B2" in c2
| openpyxlzip/formatting/tests/test_formatting.py | 7,150 | Copyright (c) 2010-2020 openpyxlzip package imports test imports First test the conditional formatting rules read | 113 | en | 0.608945 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVirtualMachineScaleSetResult',
'AwaitableGetVirtualMachineScaleSetResult',
'get_virtual_machine_scale_set',
]
@pulumi.output_type
class GetVirtualMachineScaleSetResult:
"""
Describes a Virtual Machine Scale Set.
"""
def __init__(__self__, additional_capabilities=None, automatic_repairs_policy=None, do_not_run_extensions_on_overprovisioned_vms=None, extended_location=None, host_group=None, id=None, identity=None, location=None, name=None, orchestration_mode=None, overprovision=None, plan=None, platform_fault_domain_count=None, provisioning_state=None, proximity_placement_group=None, scale_in_policy=None, single_placement_group=None, sku=None, tags=None, type=None, unique_id=None, upgrade_policy=None, virtual_machine_profile=None, zone_balance=None, zones=None):
if additional_capabilities and not isinstance(additional_capabilities, dict):
raise TypeError("Expected argument 'additional_capabilities' to be a dict")
pulumi.set(__self__, "additional_capabilities", additional_capabilities)
if automatic_repairs_policy and not isinstance(automatic_repairs_policy, dict):
raise TypeError("Expected argument 'automatic_repairs_policy' to be a dict")
pulumi.set(__self__, "automatic_repairs_policy", automatic_repairs_policy)
if do_not_run_extensions_on_overprovisioned_vms and not isinstance(do_not_run_extensions_on_overprovisioned_vms, bool):
raise TypeError("Expected argument 'do_not_run_extensions_on_overprovisioned_vms' to be a bool")
pulumi.set(__self__, "do_not_run_extensions_on_overprovisioned_vms", do_not_run_extensions_on_overprovisioned_vms)
if extended_location and not isinstance(extended_location, dict):
raise TypeError("Expected argument 'extended_location' to be a dict")
pulumi.set(__self__, "extended_location", extended_location)
if host_group and not isinstance(host_group, dict):
raise TypeError("Expected argument 'host_group' to be a dict")
pulumi.set(__self__, "host_group", host_group)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if orchestration_mode and not isinstance(orchestration_mode, str):
raise TypeError("Expected argument 'orchestration_mode' to be a str")
pulumi.set(__self__, "orchestration_mode", orchestration_mode)
if overprovision and not isinstance(overprovision, bool):
raise TypeError("Expected argument 'overprovision' to be a bool")
pulumi.set(__self__, "overprovision", overprovision)
if plan and not isinstance(plan, dict):
raise TypeError("Expected argument 'plan' to be a dict")
pulumi.set(__self__, "plan", plan)
if platform_fault_domain_count and not isinstance(platform_fault_domain_count, int):
raise TypeError("Expected argument 'platform_fault_domain_count' to be a int")
pulumi.set(__self__, "platform_fault_domain_count", platform_fault_domain_count)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if proximity_placement_group and not isinstance(proximity_placement_group, dict):
raise TypeError("Expected argument 'proximity_placement_group' to be a dict")
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if scale_in_policy and not isinstance(scale_in_policy, dict):
raise TypeError("Expected argument 'scale_in_policy' to be a dict")
pulumi.set(__self__, "scale_in_policy", scale_in_policy)
if single_placement_group and not isinstance(single_placement_group, bool):
raise TypeError("Expected argument 'single_placement_group' to be a bool")
pulumi.set(__self__, "single_placement_group", single_placement_group)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_id and not isinstance(unique_id, str):
raise TypeError("Expected argument 'unique_id' to be a str")
pulumi.set(__self__, "unique_id", unique_id)
if upgrade_policy and not isinstance(upgrade_policy, dict):
raise TypeError("Expected argument 'upgrade_policy' to be a dict")
pulumi.set(__self__, "upgrade_policy", upgrade_policy)
if virtual_machine_profile and not isinstance(virtual_machine_profile, dict):
raise TypeError("Expected argument 'virtual_machine_profile' to be a dict")
pulumi.set(__self__, "virtual_machine_profile", virtual_machine_profile)
if zone_balance and not isinstance(zone_balance, bool):
raise TypeError("Expected argument 'zone_balance' to be a bool")
pulumi.set(__self__, "zone_balance", zone_balance)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="additionalCapabilities")
def additional_capabilities(self) -> Optional['outputs.AdditionalCapabilitiesResponse']:
"""
Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type.
"""
return pulumi.get(self, "additional_capabilities")
@property
@pulumi.getter(name="automaticRepairsPolicy")
def automatic_repairs_policy(self) -> Optional['outputs.AutomaticRepairsPolicyResponse']:
"""
Policy for automatic repairs.
"""
return pulumi.get(self, "automatic_repairs_policy")
@property
@pulumi.getter(name="doNotRunExtensionsOnOverprovisionedVMs")
def do_not_run_extensions_on_overprovisioned_vms(self) -> Optional[bool]:
"""
When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs.
"""
return pulumi.get(self, "do_not_run_extensions_on_overprovisioned_vms")
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional['outputs.ExtendedLocationResponse']:
"""
The extended location of the Virtual Machine Scale Set.
"""
return pulumi.get(self, "extended_location")
@property
@pulumi.getter(name="hostGroup")
def host_group(self) -> Optional['outputs.SubResourceResponse']:
"""
Specifies information about the dedicated host group that the virtual machine scale set resides in. <br><br>Minimum api-version: 2020-06-01.
"""
return pulumi.get(self, "host_group")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.VirtualMachineScaleSetIdentityResponse']:
"""
The identity of the virtual machine scale set, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="orchestrationMode")
def orchestration_mode(self) -> Optional[str]:
"""
Specifies the orchestration mode for the virtual machine scale set.
"""
return pulumi.get(self, "orchestration_mode")
@property
@pulumi.getter
def overprovision(self) -> Optional[bool]:
"""
Specifies whether the Virtual Machine Scale Set should be overprovisioned.
"""
return pulumi.get(self, "overprovision")
@property
@pulumi.getter
def plan(self) -> Optional['outputs.PlanResponse']:
"""
Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter(name="platformFaultDomainCount")
def platform_fault_domain_count(self) -> Optional[int]:
"""
Fault Domain count for each placement group.
"""
return pulumi.get(self, "platform_fault_domain_count")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> Optional['outputs.SubResourceResponse']:
"""
Specifies information about the proximity placement group that the virtual machine scale set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
"""
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter(name="scaleInPolicy")
def scale_in_policy(self) -> Optional['outputs.ScaleInPolicyResponse']:
"""
Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.
"""
return pulumi.get(self, "scale_in_policy")
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> Optional[bool]:
"""
When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to false. However, if singlePlacementGroup is false, it may not be modified to true.
"""
return pulumi.get(self, "single_placement_group")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The virtual machine scale set sku.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueId")
def unique_id(self) -> str:
"""
Specifies the ID which uniquely identifies a Virtual Machine Scale Set.
"""
return pulumi.get(self, "unique_id")
@property
@pulumi.getter(name="upgradePolicy")
def upgrade_policy(self) -> Optional['outputs.UpgradePolicyResponse']:
"""
The upgrade policy.
"""
return pulumi.get(self, "upgrade_policy")
@property
@pulumi.getter(name="virtualMachineProfile")
def virtual_machine_profile(self) -> Optional['outputs.VirtualMachineScaleSetVMProfileResponse']:
"""
The virtual machine profile.
"""
return pulumi.get(self, "virtual_machine_profile")
@property
@pulumi.getter(name="zoneBalance")
def zone_balance(self) -> Optional[bool]:
"""
Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage.
"""
return pulumi.get(self, "zone_balance")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
The virtual machine scale set zones. NOTE: Availability zones can only be set when you create the scale set
"""
return pulumi.get(self, "zones")
class AwaitableGetVirtualMachineScaleSetResult(GetVirtualMachineScaleSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualMachineScaleSetResult(
additional_capabilities=self.additional_capabilities,
automatic_repairs_policy=self.automatic_repairs_policy,
do_not_run_extensions_on_overprovisioned_vms=self.do_not_run_extensions_on_overprovisioned_vms,
extended_location=self.extended_location,
host_group=self.host_group,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
orchestration_mode=self.orchestration_mode,
overprovision=self.overprovision,
plan=self.plan,
platform_fault_domain_count=self.platform_fault_domain_count,
provisioning_state=self.provisioning_state,
proximity_placement_group=self.proximity_placement_group,
scale_in_policy=self.scale_in_policy,
single_placement_group=self.single_placement_group,
sku=self.sku,
tags=self.tags,
type=self.type,
unique_id=self.unique_id,
upgrade_policy=self.upgrade_policy,
virtual_machine_profile=self.virtual_machine_profile,
zone_balance=self.zone_balance,
zones=self.zones)
def get_virtual_machine_scale_set(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
vm_scale_set_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualMachineScaleSetResult:
"""
Describes a Virtual Machine Scale Set.
API Version: 2021-03-01.
:param str expand: The expand expression to apply on the operation. 'UserData' retrieves the UserData property of the VM scale set that was provided by the user during the VM scale set Create/Update operation
:param str resource_group_name: The name of the resource group.
:param str vm_scale_set_name: The name of the VM scale set.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['vmScaleSetName'] = vm_scale_set_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute:getVirtualMachineScaleSet', __args__, opts=opts, typ=GetVirtualMachineScaleSetResult).value
return AwaitableGetVirtualMachineScaleSetResult(
additional_capabilities=__ret__.additional_capabilities,
automatic_repairs_policy=__ret__.automatic_repairs_policy,
do_not_run_extensions_on_overprovisioned_vms=__ret__.do_not_run_extensions_on_overprovisioned_vms,
extended_location=__ret__.extended_location,
host_group=__ret__.host_group,
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
orchestration_mode=__ret__.orchestration_mode,
overprovision=__ret__.overprovision,
plan=__ret__.plan,
platform_fault_domain_count=__ret__.platform_fault_domain_count,
provisioning_state=__ret__.provisioning_state,
proximity_placement_group=__ret__.proximity_placement_group,
scale_in_policy=__ret__.scale_in_policy,
single_placement_group=__ret__.single_placement_group,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
unique_id=__ret__.unique_id,
upgrade_policy=__ret__.upgrade_policy,
virtual_machine_profile=__ret__.virtual_machine_profile,
zone_balance=__ret__.zone_balance,
zones=__ret__.zones)
| sdk/python/pulumi_azure_native/compute/get_virtual_machine_scale_set.py | 17,549 | Describes a Virtual Machine Scale Set.
Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type.
Policy for automatic repairs.
When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs.
The extended location of the Virtual Machine Scale Set.
Describes a Virtual Machine Scale Set.
API Version: 2021-03-01.
:param str expand: The expand expression to apply on the operation. 'UserData' retrieves the UserData property of the VM scale set that was provided by the user during the VM scale set Create/Update operation
:param str resource_group_name: The name of the resource group.
:param str vm_scale_set_name: The name of the VM scale set.
Specifies information about the dedicated host group that the virtual machine scale set resides in. <br><br>Minimum api-version: 2020-06-01.
Resource Id
The identity of the virtual machine scale set, if configured.
Resource location
Resource name
Specifies the orchestration mode for the virtual machine scale set.
Specifies whether the Virtual Machine Scale Set should be overprovisioned.
Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
Fault Domain count for each placement group.
The provisioning state, which only appears in the response.
Specifies information about the proximity placement group that the virtual machine scale set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.
When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to false. However, if singlePlacementGroup is false, it may not be modified to true.
The virtual machine scale set sku.
Resource tags
Resource type
Specifies the ID which uniquely identifies a Virtual Machine Scale Set.
The upgrade policy.
The virtual machine profile.
Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage.
The virtual machine scale set zones. NOTE: Availability zones can only be set when you create the scale set
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** pylint: disable=using-constant-test | 3,046 | en | 0.782946 |
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from unittest import mock
from nesta.packages.geo_utils.geocode import geocode
from nesta.packages.geo_utils.geocode import _geocode
from nesta.packages.geo_utils.geocode import geocode_dataframe
from nesta.packages.geo_utils.geocode import geocode_batch_dataframe
from nesta.packages.geo_utils.geocode import generate_composite_key
from nesta.packages.geo_utils.country_iso_code import country_iso_code
from nesta.packages.geo_utils.country_iso_code import country_iso_code_dataframe
from nesta.packages.geo_utils.country_iso_code import country_iso_code_to_name
from nesta.packages.geo_utils.lookup import get_continent_lookup
from nesta.packages.geo_utils.lookup import get_country_region_lookup
from nesta.packages.geo_utils.lookup import get_country_continent_lookup
REQUESTS = 'nesta.packages.geo_utils.geocode.requests.get'
PYCOUNTRY = 'nesta.packages.geo_utils.country_iso_code.pycountry.countries.get'
GEOCODE = 'nesta.packages.geo_utils.geocode.geocode'
_GEOCODE = 'nesta.packages.geo_utils.geocode._geocode'
COUNTRY_ISO_CODE = 'nesta.packages.geo_utils.country_iso_code.country_iso_code'
class TestGeocoding():
@staticmethod
@pytest.fixture
def mocked_osm_response():
mocked_response = mock.Mock()
mocked_response.json.return_value = [{'lat': '12.923432', 'lon': '-75.234569'}]
return mocked_response
def test_error_raised_when_arguments_missing(self):
with pytest.raises(ValueError) as e:
geocode()
assert "No geocode match" in str(e.value)
@mock.patch(REQUESTS)
def test_request_includes_user_agent_in_header(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
geocode(something='a')
assert mocked_request.call_args[1]['headers'] == {'User-Agent': 'Nesta health data geocode'}
@mock.patch(REQUESTS)
def test_url_correct_with_city_and_country(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
kwargs = dict(city='london', country='UK')
geocode(**kwargs)
assert mocked_request.call_args[1]['params'] == dict(format="json", **kwargs)
@mock.patch(REQUESTS)
def test_url_correct_with_query(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
kwargs = dict(q='my place')
geocode(**kwargs)
assert mocked_request.call_args[1]['params'] == dict(format="json", **kwargs)
@mock.patch(REQUESTS)
def test_error_returned_if_no_match(self, mocked_request):
mocked_response = mock.Mock()
mocked_response.json.return_value = []
mocked_request.return_value = mocked_response
with pytest.raises(ValueError) as e:
geocode(q="Something bad")
assert "No geocode match" in str(e.value)
@mock.patch(REQUESTS)
def test_coordinates_extracted_from_json_with_one_result(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
assert geocode(q='somewhere') == [{'lat': '12.923432', 'lon': '-75.234569'}]
@mock.patch(GEOCODE)
def test_geocode_wrapper_rejects_invalid_query_parameters(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode(cat='dog', city='Nice')
assert "Invalid query parameter" in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_rejects_both_q_and_kwargs_supplied(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode(city='London', q='somewhere')
assert "Supply either q OR other query parameters, they cannot be combined." in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_errors_if_no_query_parameters_supplied(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode()
assert "No query parameters supplied" in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_calls_geocode_properly(self, mocked_geocode):
mocked_geocode.return_value = [{'lat': 1.1, 'lon': 2.2}]
_geocode('my place')
_geocode(q='somewhere')
_geocode(city='London', country='UK')
_geocode(postalcode='ABC 123')
expected_calls = [mock.call(q='my place'),
mock.call(q='somewhere'),
mock.call(city='London', country='UK'),
mock.call(postalcode='ABC 123')
]
assert mocked_geocode.mock_calls == expected_calls
class TestGeocodeDataFrame():
@staticmethod
@pytest.fixture
def test_dataframe():
df = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
})
return df
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_city_country(self, mocked_geocode,
test_dataframe):
# Generate dataframe using a mocked output
mocked_geocode.side_effect = ['cat', 'dog', 'squirrel']
geocoded_dataframe = geocode_dataframe(test_dataframe)
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'coordinates': ['cat', 'dog', 'squirrel']
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium')]
# Check expected behaviours
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_fallback(self, mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [None, None, None, 'dog', 'cat', 'squirrel']
geocoded_dataframe = geocode_dataframe(test_dataframe)
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'coordinates': ['dog', 'cat', 'squirrel']
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium'),
mock.call('London UK'),
mock.call('Sheffield United Kingdom'),
mock.call('Brussels Belgium')]
# Check expected behaviours
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_duplicates_are_only_geocoded_once(self, mocked_geocode):
test_dataframe = pd.DataFrame({'index': [0, 1, 2, 3],
'city': ['London', 'Brussels', 'London', 'Brussels'],
'country': ['UK', 'Belgium', 'UK', 'Belgium']
})
mocked_geocode.side_effect = ['LON', 'BRU']
geocoded_dataframe = geocode_dataframe(test_dataframe)
expected_dataframe = pd.DataFrame({'index': [0, 1, 2, 3],
'city': ['London', 'Brussels', 'London', 'Brussels'],
'country': ['UK', 'Belgium', 'UK', 'Belgium'],
'coordinates': ['LON', 'BRU', 'LON', 'BRU']
})
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.call_count == 2
class TestGeocodeBatchDataframe():
@staticmethod
@pytest.fixture
def test_dataframe():
df = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
})
return df
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_city_country(self, mocked_geocode,
test_dataframe):
# Generate dataframe using a mocked output
mocked_geocode.side_effect = [{'lat': '12.923432', 'lon': '-75.234569'},
{'lat': '99.999999', 'lon': '-88.888888'},
{'lat': '-2.202022', 'lon': '0.000000'}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [12.923432, 99.999999, -2.202022],
'longitude': [-75.234569, -88.888888, 0.0]
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe)
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_fallback(self,
mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [None,
{'lat': 1, 'lon': 4},
None,
{'lat': 2, 'lon': 5},
None,
{'lat': 3, 'lon': 6}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [1.0, 2.0, 3.0],
'longitude': [4.0, 5.0, 6.0],
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(q='London UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(q='Sheffield United Kingdom'),
mock.call(city='Brussels', country='Belgium'),
mock.call(q='Brussels Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe, query_method='both')
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_method_only(self,
mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [{'lat': 1, 'lon': 4},
{'lat': 2, 'lon': 5},
{'lat': 3, 'lon': 6}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [1.0, 2.0, 3.0],
'longitude': [4.0, 5.0, 6.0],
})
expected_calls = [mock.call(q='London UK'),
mock.call(q='Sheffield United Kingdom'),
mock.call(q='Brussels Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe, query_method='query_only')
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_valueerror_raised_when_invalid_query_method_passed(self,
mocked_geocode,
test_dataframe):
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method='cats')
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method='test')
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method=1)
@mock.patch(_GEOCODE)
def test_output_column_names_are_applied(self, mocked_geocode, test_dataframe):
# Generate dataframe using a mocked output
mocked_geocode.side_effect = [{'lat': '12.923432', 'lon': '-75.234569'},
{'lat': '99.999999', 'lon': '-88.888888'},
{'lat': '-2.202022', 'lon': '0.000000'}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'lat': [12.923432, 99.999999, -2.202022],
'lon': [-75.234569, -88.888888, 0.0]
})
geocoded_dataframe = geocode_batch_dataframe(test_dataframe,
latitude='lat',
longitude='lon')
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
class TestCountryIsoCode():
@mock.patch(PYCOUNTRY)
def test_lookup_via_name(self, mocked_pycountry):
mocked_pycountry.return_value = 'country_object'
expected_calls = [mock.call(name='United Kingdom')]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 1
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_lookup_via_common_name(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), 'country_object']
expected_calls = [mock.call(name='United Kingdom'),
mock.call(common_name='United Kingdom')
]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 2
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_lookup_via_official_name(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), KeyError(), 'country_object']
expected_calls = [mock.call(name='United Kingdom'),
mock.call(common_name='United Kingdom'),
mock.call(official_name='United Kingdom')
]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 3
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_invalid_lookup_raises_keyerror(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), KeyError(), KeyError()]*2
with pytest.raises(KeyError) as e:
country_iso_code('Fake Country')
assert 'Fake Country not found' in str(e.value)
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_title_case_is_applied(self, mocked_pycountry):
expected_calls = []
names = ['united kingdom', 'UNITED KINGDOM',
'United kingdom']
mocked_pycountry.side_effect = [KeyError(), KeyError(), KeyError(), 'blah'] * len(names)
for name in names:
country_iso_code(name) # Find the iso codes
raw_call = mock.call(name=name)
common_call = mock.call(common_name=name)
official_call = mock.call(official_name=name)
title_call = mock.call(name='United Kingdom')
expected_calls.append(raw_call) # The initial call
expected_calls.append(common_call) # Tries common name call
expected_calls.append(official_call) # Tries official name
expected_calls.append(title_call) # The title case call
assert mocked_pycountry.mock_calls == expected_calls
country_iso_code.cache_clear()
class TestCountryIsoCodeDataframe():
@staticmethod
def _mocked_response(alpha_2, alpha_3, numeric, continent):
'''Builds a mocked response for the patched country_iso_code function.'''
response = mock.Mock()
response.alpha_2 = alpha_2
response.alpha_3 = alpha_3
response.numeric = numeric
response.continent = continent
return response
@mock.patch(COUNTRY_ISO_CODE)
def test_valid_countries_coded(self, mocked_country_iso_code):
test_df = pd.DataFrame({'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States']
})
mocked_response_uk = self._mocked_response('GB', 'GBR', '123', 'EU')
mocked_response_be = self._mocked_response('BE', 'BEL', '875', 'EU')
mocked_response_us = self._mocked_response('US', 'USA', '014', 'NA')
mocked_country_iso_code.side_effect = [mocked_response_uk,
mocked_response_be,
mocked_response_us
]
expected_dataframe = pd.DataFrame(
{'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States'],
'country_alpha_2': ['GB', 'BE', 'US'],
'country_alpha_3': ['GBR', 'BEL', 'USA'],
'country_numeric': ['123', '875', '014'],
'continent': ['EU', 'EU', 'NA']
})
coded_df = country_iso_code_dataframe(test_df)
assert coded_df.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
@mock.patch(COUNTRY_ISO_CODE)
def test_invalid_countries_data_is_none(self, mocked_country_iso_code):
test_df = pd.DataFrame({'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States']
})
mocked_country_iso_code.side_effect = KeyError
expected_dataframe = pd.DataFrame(
{'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States'],
'country_alpha_2': [None, None, None],
'country_alpha_3': [None, None, None],
'country_numeric': [None, None, None],
'continent': [None, None, None]
})
coded_df = country_iso_code_dataframe(test_df)
assert coded_df.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
class TestCountryIsoCodeToName():
def test_valid_iso_code_returns_name(self):
assert country_iso_code_to_name('ITA') == 'Italy'
assert country_iso_code_to_name('DEU') == 'Germany'
assert country_iso_code_to_name('GBR') == 'United Kingdom'
def test_invalid_iso_code_returns_none(self):
assert country_iso_code_to_name('FOO') is None
assert country_iso_code_to_name('ABC') is None
assert country_iso_code_to_name('ZZZ') is None
def test_generate_composite_key():
assert generate_composite_key('London', 'United Kingdom') == 'london_united-kingdom'
assert generate_composite_key('Paris', 'France') == 'paris_france'
assert generate_composite_key('Name-with hyphen', 'COUNTRY') == 'name-with-hyphen_country'
def test_generate_composite_key_raises_error_with_invalid_input():
with pytest.raises(ValueError):
generate_composite_key(None, 'UK')
with pytest.raises(ValueError):
generate_composite_key('city_only')
with pytest.raises(ValueError):
generate_composite_key(1, 2)
def test_get_continent_lookup():
continents = get_continent_lookup()
assert None in continents
assert '' in continents
assert continents['NA'] == 'North America'
assert len(continents) == 9 # 2 nulls + 7 continents
def test_get_country_region_lookup():
countries = get_country_region_lookup()
assert len(countries) > 100
assert len(countries) < 1000
assert all(len(k) == 2 for k in countries.keys())
assert all(type(v) is tuple for v in countries.values())
assert all(len(v) == 2 for v in countries.values())
all_regions = {v[1] for v in countries.values()}
assert len(all_regions) == 18
def test_country_continent_lookup():
lookup = get_country_continent_lookup()
non_nulls = {k: v for k, v in lookup.items()
if k is not None and k != ''}
# All iso2, so length == 2
assert all(len(k) == 2 for k in non_nulls.items())
assert all(len(v) == 2 for v in non_nulls.values())
# Either strings or Nones
country_types = set(type(v) for v in lookup.values())
assert country_types == {str, type(None)}
# Right ball-park of country and continent numbers
assert len(non_nulls) > 100 # num countries
assert len(non_nulls) < 1000 # num countries
assert len(set(non_nulls.values())) == 7 # num continents
| nesta/packages/geo_utils/tests/test_geotools.py | 23,712 | Builds a mocked response for the patched country_iso_code function.
Generate dataframe using a mocked output Expected outputs Check expected behaviours Expected outputs Check expected behaviours Generate dataframe using a mocked output Expected outputs Check expected behaviours Expected outputs Check expected behaviours Expected outputs Check expected behaviours Generate dataframe using a mocked output Expected outputs Check expected behaviours Find the iso codes The initial call Tries common name call Tries official name The title case call 2 nulls + 7 continents All iso2, so length == 2 Either strings or Nones Right ball-park of country and continent numbers num countries num countries num continents | 713 | en | 0.652405 |
"""
This module is for testing the distributions. Tests should focus on ensuring we can
expand distributions without missing emails or getting too many or running into infinite
loops.
"""
from django.test import TestCase
from ..models import EmailAddress, Distribution
class DistributionTestCase(TestCase):
def setUp(self):
self.test1 = EmailAddress.objects.create(email_address="test1@example.org")
self.test2 = EmailAddress.objects.create(email_address="test2@example.org")
self.all_emails = set([self.test1, self.test2])
self.disti = Distribution.objects.create(name="Test Disti")
self.disti.email_addresses.add(self.test1, self.test2)
# build disti with duplicates
self.dupe_disti = Distribution.objects.create(name="Dupe Disti")
self.dupe_disti.email_addresses.add(self.test1, self.test2)
self.dupe_disti.distributions.add(self.disti)
# build disti with self reference
self.self_disti = Distribution.objects.create(name="Self Disti")
self.self_disti.email_addresses.add(self.test1)
self.self_disti.distributions.add(self.self_disti)
# build disti with cyclic reference
self.cyclic_disti1 = Distribution.objects.create(name="Cyclic Disti 1")
self.cyclic_disti1.email_addresses.add(self.test1)
self.cyclic_disti2 = Distribution.objects.create(name="Cyclic Disti 2")
self.cyclic_disti2.email_addresses.add(self.test2)
self.cyclic_disti1.distributions.add(self.cyclic_disti2)
self.cyclic_disti2.distributions.add(self.cyclic_disti1)
def test_constructor_properties(self):
self.assertEqual(self.disti.name, "Test Disti")
emails = self.disti.email_addresses.all()
self.assertIn(self.test1, emails)
self.assertIn(self.test2, emails)
def test_collect_distribution(self):
"""
Test that emails are collected properly.
"""
test_emails = self.disti.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
def test_collect_distribution_with_duplicates(self):
"""
Test that a distribution with duplicates to ensure it only collects each email
once.
"""
test_emails = self.dupe_disti.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
def test_collect_distribution_with_self_references(self):
"""
Test that a distribution with self references to ensure it only collects each
email once, and without looping infinitely.
"""
test_emails = self.self_disti.collect_email_addresses()
self.assertEqual(len(test_emails), 1)
self.assertSetEqual(set([self.test1]), set(test_emails))
def test_collect_distribution_with_cyclic_references(self):
"""
Test that a distribution with cyclic references only collects each email once,
and without looping infinitely.
"""
test_emails = self.cyclic_disti1.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
test_emails = self.cyclic_disti2.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
| impression/tests/test_distribution.py | 3,447 | Test that emails are collected properly.
Test that a distribution with cyclic references only collects each email once,
and without looping infinitely.
Test that a distribution with duplicates to ensure it only collects each email
once.
Test that a distribution with self references to ensure it only collects each
email once, and without looping infinitely.
This module is for testing the distributions. Tests should focus on ensuring we can
expand distributions without missing emails or getting too many or running into infinite
loops.
build disti with duplicates build disti with self reference build disti with cyclic reference | 634 | en | 0.902668 |
import bpy
from bpy import context
from . import node_functions
from . import material_functions
from . import constants
import mathutils
def update_selected_image(self, context):
sel_texture = bpy.data.images[self.texture_index]
show_image_in_image_editor(sel_texture)
def show_image_in_image_editor(image):
for area in bpy.context.screen.areas:
if area.type == 'IMAGE_EDITOR':
area.spaces.active.image = image
def switch_baked_material(show_bake_material,affect):
current_bake_type = bpy.context.scene.bake_settings.get_current_bake_type()
material_name_suffix = constants.Material_Suffix.bake_type_mat_suffix[current_bake_type]
# on what object to work
if affect == 'active':
objects = [bpy.context.active_object]
elif affect == 'selected':
objects = bpy.context.selected_editable_objects
elif affect == 'visible':
objects = [ob for ob in bpy.context.view_layer.objects if ob.visible_get()]
elif affect == 'scene':
objects = bpy.context.scene.objects
all_mats = bpy.data.materials
baked_mats = [mat for mat in all_mats if material_name_suffix in mat.name]
for obj in objects:
if current_bake_type != "pbr":
baked_ao_flag = getattr(obj,"ao_map_name") != '' or getattr(obj,"lightmap_name") != ''
if not baked_ao_flag:
continue
for slot in obj.material_slots:
if show_bake_material:
for baked_mat in baked_mats:
if baked_mat.name == slot.material.name + material_name_suffix + obj.bake_version:
slot.material = baked_mat
else:
if (material_name_suffix in slot.material.name):
bake_material = slot.material
index = bake_material.name.find(material_name_suffix)
org_mat = all_mats.get(bake_material.name[0:index])
if org_mat is not None:
slot.material = org_mat
def preview_bake_texture(self,context):
context = bpy.context
bake_settings = context.scene.bake_settings
preview_bake_texture = context.scene.texture_settings.preview_bake_texture
vis_mats = material_functions.get_all_visible_materials()
for mat in vis_mats:
if not mat.node_tree:
continue
nodes = mat.node_tree.nodes
bake_texture_node = None
if bake_settings.lightmap_bake:
bake_texture_node = nodes.get(bake_settings.texture_node_lightmap)
elif bake_settings.ao_bake:
bake_texture_node = nodes.get(bake_settings.texture_node_ao)
if bake_texture_node is not None:
if preview_bake_texture:
node_functions.emission_setup(mat, bake_texture_node.outputs["Color"])
else:
pbr_node = node_functions.get_nodes_by_type(nodes, constants.Node_Types.pbr_node)
if len(pbr_node) == 0:
return
pbr_node = pbr_node[0]
node_functions.remove_node(mat, "Emission Bake")
node_functions.reconnect_PBR(mat, pbr_node)
def preview_lightmap(self, context):
preview_lightmap = context.scene.texture_settings.preview_lightmap
vis_mats = material_functions.get_all_visible_materials()
for material in vis_mats:
if not material.node_tree:
continue
nodes = material.node_tree.nodes
lightmap_node = nodes.get("Lightmap")
if lightmap_node is None:
continue
pbr_node = node_functions.get_pbr_node(material)
if pbr_node is None:
print("\n " + material.name + " has no PBR Node \n")
continue
base_color_input = node_functions.get_pbr_inputs(pbr_node)["base_color_input"]
emission_input = node_functions.get_pbr_inputs(pbr_node)["emission_input"]
lightmap_output = lightmap_node.outputs["Color"]
if preview_lightmap:
# add mix node
mix_node_name = "Mulitply Lightmap"
mix_node = node_functions.add_node(material,constants.Shader_Node_Types.mix, mix_node_name)
mix_node.blend_type = 'MULTIPLY'
mix_node.inputs[0].default_value = 1 # set factor to 1
pos_offset = mathutils.Vector((-200, 200))
mix_node.location = pbr_node.location + pos_offset
mix_node_input1 = mix_node.inputs["Color1"]
mix_node_input2 = mix_node.inputs["Color2"]
mix_node_output = mix_node.outputs["Color"]
# image texture in base color
if base_color_input.is_linked:
node_before_base_color = base_color_input.links[0].from_node
if not node_before_base_color.name == mix_node_name:
node_functions.make_link(material, node_before_base_color.outputs["Color"], mix_node_input1)
node_functions.make_link(material, lightmap_output, mix_node_input2)
node_functions.make_link(material, mix_node_output, base_color_input)
else :
mix_node_input1.default_value = base_color_input.default_value
node_functions.make_link(material, lightmap_output, mix_node_input2)
node_functions.make_link(material, mix_node_output, base_color_input)
node_functions.remove_link(material,lightmap_output,emission_input)
if not preview_lightmap:
# remove mix and reconnect base color
mix_node = nodes.get("Mulitply Lightmap")
if mix_node is not None:
color_input_connections = len(mix_node.inputs["Color1"].links)
if (color_input_connections == 0):
node_functions.remove_node(material,mix_node.name)
else:
node_functions.remove_reconnect_node(material,mix_node.name)
node_functions.link_pbr_to_output(material,pbr_node)
def lightmap_to_emission(self, context, connect):
vis_mats = material_functions.get_all_visible_materials()
for material in vis_mats:
if not material.node_tree:
continue
nodes = material.node_tree.nodes
pbr_node = node_functions.get_pbr_node(material)
lightmap_node = nodes.get("Lightmap")
if lightmap_node is None:
continue
emission_input = node_functions.get_pbr_inputs(pbr_node)["emission_input"]
lightmap_output = lightmap_node.outputs["Color"]
if connect:
node_functions.make_link(material, lightmap_output, emission_input)
else:
node_functions.remove_link(material,lightmap_output,emission_input)
| Functions/visibility_functions.py | 7,196 | on what object to work add mix node set factor to 1 image texture in base color remove mix and reconnect base color | 115 | en | 0.741491 |
#!/usr/bin/env python
import sys
import re
def setup_python3():
# Taken from "distribute" setup.py
from distutils.filelist import FileList
from distutils import dir_util, file_util, util, log
from os.path import join
tmp_src = join("build", "src")
log.set_verbosity(1)
fl = FileList()
for line in open("MANIFEST.in"):
if not line.strip():
continue
fl.process_template_line(line)
dir_util.create_tree(tmp_src, fl.files)
outfiles_2to3 = []
for f in fl.files:
outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1)
if copied and outf.endswith(".py"):
outfiles_2to3.append(outf)
util.run_2to3(outfiles_2to3)
# arrange setup to use the copy
sys.path.insert(0, tmp_src)
return tmp_src
kwargs = {}
if sys.version_info[0] >= 3:
from setuptools import setup
kwargs['use_2to3'] = True
kwargs['install_requires'] = ['html5lib', 'rdflib>3.0.0']
kwargs['src_root'] = setup_python3()
else:
try:
from setuptools import setup
kwargs['test_suite'] = "nose.collector"
kwargs['install_requires'] = ['html5lib', 'rdflib>3.0.0']
except ImportError:
from distutils.core import setup
# Find version. We have to do this because we can't import it in Python 3 until
# its been automatically converted in the setup process.
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
version = find_version('pyRdfa/__init__.py')
setup(
name = 'pyRdfa',
version = version,
description = "",
author = "",
author_email = "",
maintainer = "",
maintainer_email = "",
url = "",
license = "LICENSE",
platforms = ["any"],
classifiers = ["Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.4",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Natural Language :: English",
],
long_description = \
"""
""",
download_url = "%s.tar.gz" % version,
packages = ['pyRdfa',
'pyRdfa/host',
'pyRdfa/rdfs',
'pyRdfa/serializers',
'pyRdfa/transform',
],
**kwargs
)
| setup.py | 2,926 | !/usr/bin/env python Taken from "distribute" setup.py arrange setup to use the copy Find version. We have to do this because we can't import it in Python 3 until its been automatically converted in the setup process. | 216 | en | 0.902633 |
import datetime
import urllib
from django.conf import settings
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.translation import ugettext as _
from rest_flex_fields import FlexFieldsModelSerializer
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from rest_framework import serializers
from readthedocs.builds.models import Build, Version
from readthedocs.core.utils import slugify
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.oauth.models import RemoteOrganization, RemoteRepository
from readthedocs.organizations.models import Organization, Team
from readthedocs.projects.constants import (
LANGUAGES,
PROGRAMMING_LANGUAGES,
REPO_CHOICES,
)
from readthedocs.projects.models import (
EnvironmentVariable,
Project,
ProjectRelationship,
)
from readthedocs.redirects.models import TYPE_CHOICES as REDIRECT_TYPE_CHOICES
from readthedocs.redirects.models import Redirect
class UserSerializer(FlexFieldsModelSerializer):
class Meta:
model = User
fields = [
'username',
]
class BaseLinksSerializer(serializers.Serializer):
def _absolute_url(self, path):
scheme = 'http' if settings.DEBUG else 'https'
domain = settings.PRODUCTION_DOMAIN
return urllib.parse.urlunparse((scheme, domain, path, '', '', ''))
class BuildCreateSerializer(serializers.ModelSerializer):
"""
Used when triggering (create action) a ``Build`` for a specific ``Version``.
This serializer validates that no field is sent at all in the request.
"""
class Meta:
model = Build
fields = []
class BuildLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-builds-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'build_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_version(self, obj):
if obj.version:
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.version.slug,
},
)
return self._absolute_url(path)
return None
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class BuildURLsSerializer(BaseLinksSerializer, serializers.Serializer):
build = serializers.URLField(source='get_full_url')
project = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
def get_project(self, obj):
path = reverse(
'projects_detail',
kwargs={
'project_slug': obj.project.slug
}
)
return self._absolute_url(path)
def get_version(self, obj):
if obj.version:
path = reverse(
'project_version_detail',
kwargs={
'project_slug': obj.project.slug,
'version_slug': obj.version.slug
}
)
return self._absolute_url(path)
return None
class BuildConfigSerializer(FlexFieldsSerializerMixin, serializers.Serializer):
"""
Render ``Build.config`` property without modifying it.
.. note::
Any change on the output of that property will be reflected here,
which may produce incompatible changes in the API.
"""
def to_representation(self, instance): # pylint: disable=arguments-differ
# For now, we want to return the ``config`` object as it is without
# manipulating it.
return instance
class BuildStateSerializer(serializers.Serializer):
code = serializers.CharField(source='state')
name = serializers.SerializerMethodField()
def get_name(self, obj):
return obj.state.title()
class BuildSerializer(FlexFieldsModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
version = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='date')
finished = serializers.SerializerMethodField()
success = serializers.SerializerMethodField()
duration = serializers.IntegerField(source='length')
state = BuildStateSerializer(source='*')
_links = BuildLinksSerializer(source='*')
urls = BuildURLsSerializer(source='*')
class Meta:
model = Build
fields = [
'id',
'version',
'project',
'created',
'finished',
'duration',
'state',
'success',
'error',
'commit',
'_links',
'urls',
]
expandable_fields = {
'config': (BuildConfigSerializer,)
}
def get_finished(self, obj):
if obj.date and obj.length:
return obj.date + datetime.timedelta(seconds=obj.length)
def get_success(self, obj):
"""
Return ``None`` if the build is not finished.
This is needed because ``default=True`` in the model field.
"""
if obj.finished:
return obj.success
return None
class VersionLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-versions-builds-list',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'parent_lookup_version__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class VersionDashboardURLsSerializer(BaseLinksSerializer, serializers.Serializer):
edit = serializers.SerializerMethodField()
def get_edit(self, obj):
path = reverse(
'project_version_detail',
kwargs={
'project_slug': obj.project.slug,
'version_slug': obj.slug,
})
return self._absolute_url(path)
class VersionURLsSerializer(BaseLinksSerializer, serializers.Serializer):
documentation = serializers.SerializerMethodField()
vcs = serializers.URLField(source='vcs_url')
dashboard = VersionDashboardURLsSerializer(source='*')
def get_documentation(self, obj):
return obj.project.get_docs_url(version_slug=obj.slug,)
class VersionSerializer(FlexFieldsModelSerializer):
ref = serializers.CharField()
downloads = serializers.SerializerMethodField()
urls = VersionURLsSerializer(source='*')
_links = VersionLinksSerializer(source='*')
class Meta:
model = Version
fields = [
'id',
'slug',
'verbose_name',
'identifier',
'ref',
'built',
'active',
'hidden',
'type',
'downloads',
'urls',
'_links',
]
expandable_fields = {
'last_build': (
BuildSerializer,
)
}
def get_downloads(self, obj):
downloads = obj.get_downloads()
data = {}
for k, v in downloads.items():
if k in ('html', 'pdf', 'epub'):
# Keep backward compatibility
if k == 'html':
k = 'htmlzip'
data[k] = ('http:' if settings.DEBUG else 'https:') + v
return data
class VersionUpdateSerializer(serializers.ModelSerializer):
"""
Used when modifying (update action) a ``Version``.
It only allows to make the Version active/non-active.
"""
class Meta:
model = Version
fields = [
'active',
'hidden',
]
class LanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, language):
return language
def get_name(self, language):
for code, name in LANGUAGES:
if code == language:
return name
return 'Unknown'
class ProgrammingLanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, programming_language):
return programming_language
def get_name(self, programming_language):
for code, name in PROGRAMMING_LANGUAGES:
if code == programming_language:
return name
return 'Unknown'
class ProjectURLsSerializer(BaseLinksSerializer, serializers.Serializer):
"""Serializer with all the user-facing URLs under Read the Docs."""
documentation = serializers.CharField(source='get_docs_url')
home = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
def get_home(self, obj):
path = reverse('projects_detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse('builds_project_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse('project_version_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
class RepositorySerializer(serializers.Serializer):
url = serializers.CharField(source='repo')
type = serializers.ChoiceField(
source='repo_type',
choices=REPO_CHOICES,
)
class ProjectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
environmentvariables = serializers.SerializerMethodField()
redirects = serializers.SerializerMethodField()
subprojects = serializers.SerializerMethodField()
superproject = serializers.SerializerMethodField()
translations = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse('projects-detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse(
'projects-versions-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_environmentvariables(self, obj):
path = reverse(
'projects-environmentvariables-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_redirects(self, obj):
path = reverse(
'projects-redirects-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-builds-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_subprojects(self, obj):
path = reverse(
'projects-subprojects-list',
kwargs={
'parent_lookup_parent__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_superproject(self, obj):
path = reverse(
'projects-superproject',
kwargs={
'project_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_translations(self, obj):
path = reverse(
'projects-translations-list',
kwargs={
'parent_lookup_main_language_project__slug': obj.slug,
},
)
return self._absolute_url(path)
class ProjectCreateSerializerBase(FlexFieldsModelSerializer):
"""Serializer used to Import a Project."""
repository = RepositorySerializer(source='*')
homepage = serializers.URLField(source='project_url', required=False)
class Meta:
model = Project
fields = (
'name',
'language',
'programming_language',
'repository',
'homepage',
)
def validate_name(self, value):
potential_slug = slugify(value)
if Project.objects.filter(slug=potential_slug).exists():
raise serializers.ValidationError(
_('Project with slug "{0}" already exists.').format(potential_slug),
)
return value
class ProjectCreateSerializer(SettingsOverrideObject):
_default_class = ProjectCreateSerializerBase
class ProjectUpdateSerializerBase(FlexFieldsModelSerializer):
"""Serializer used to modify a Project once imported."""
repository = RepositorySerializer(source='*')
homepage = serializers.URLField(
source='project_url',
required=False,
)
class Meta:
model = Project
fields = (
# Settings
'name',
'repository',
'language',
'programming_language',
'homepage',
# Advanced Settings -> General Settings
'default_version',
'default_branch',
'analytics_code',
'analytics_disabled',
'show_version_warning',
'single_version',
'external_builds_enabled',
# NOTE: we do not allow to change any setting that can be set via
# the YAML config file.
)
class ProjectUpdateSerializer(SettingsOverrideObject):
_default_class = ProjectUpdateSerializerBase
class ProjectSerializer(FlexFieldsModelSerializer):
"""
Project serializer.
.. note::
When using organizations, projects don't have the concept of users.
But we have organization.users.
"""
homepage = serializers.SerializerMethodField()
language = LanguageSerializer()
programming_language = ProgrammingLanguageSerializer()
repository = RepositorySerializer(source='*')
urls = ProjectURLsSerializer(source='*')
subproject_of = serializers.SerializerMethodField()
translation_of = serializers.SerializerMethodField()
default_branch = serializers.CharField(source='get_default_branch')
tags = serializers.StringRelatedField(many=True)
if not settings.RTD_ALLOW_ORGANIZATIONS:
users = UserSerializer(many=True)
_links = ProjectLinksSerializer(source='*')
# TODO: adapt these fields with the proper names in the db and then remove
# them from here
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
class Meta:
model = Project
fields = [
'id',
'name',
'slug',
'created',
'modified',
'language',
'programming_language',
'homepage',
'repository',
'default_version',
'default_branch',
'subproject_of',
'translation_of',
'urls',
'tags',
# NOTE: ``expandable_fields`` must not be included here. Otherwise,
# they will be tried to be rendered and fail
# 'users',
# 'active_versions',
'_links',
]
if not settings.RTD_ALLOW_ORGANIZATIONS:
fields.append('users')
expandable_fields = {
# NOTE: this has to be a Model method, can't be a
# ``SerializerMethodField`` as far as I know
'active_versions': (
VersionSerializer,
{
'many': True,
}
)
}
if settings.RTD_ALLOW_ORGANIZATIONS:
expandable_fields.update({
'organization': (
'readthedocs.api.v3.serializers.OrganizationSerializer',
# NOTE: we cannot have a Project with multiple organizations.
{'source': 'organizations.first'},
),
'teams': (
serializers.SlugRelatedField,
{
'slug_field': 'slug',
'many': True,
'read_only': True,
},
),
})
def get_homepage(self, obj):
# Overridden only to return ``None`` when the project_url is ``''``
return obj.project_url or None
def get_translation_of(self, obj):
if obj.main_language_project:
return self.__class__(obj.main_language_project).data
def get_subproject_of(self, obj):
try:
return self.__class__(obj.superprojects.first().parent).data
except Exception:
return None
class SubprojectCreateSerializer(FlexFieldsModelSerializer):
"""Serializer used to define a Project as subproject of another Project."""
child = serializers.SlugRelatedField(
slug_field='slug',
queryset=Project.objects.none(),
)
class Meta:
model = ProjectRelationship
fields = [
'child',
'alias',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parent_project = self.context['parent']
user = self.context['request'].user
self.fields['child'].queryset = (
self.parent_project.get_subproject_candidates(user)
)
# Give users a better error message.
self.fields['child'].error_messages['does_not_exist'] = _(
'Project with {slug_name}={value} is not valid as subproject'
)
def validate_alias(self, value):
# Check there is not a subproject with this alias already
subproject = self.parent_project.subprojects.filter(alias=value)
if subproject.exists():
raise serializers.ValidationError(
_('A subproject with this alias already exists'),
)
return value
# pylint: disable=arguments-differ
def validate(self, data):
self.parent_project.is_valid_as_superproject(
serializers.ValidationError
)
return data
class SubprojectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
parent = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-subprojects-detail',
kwargs={
'parent_lookup_parent__slug': obj.parent.slug,
'alias_slug': obj.alias,
},
)
return self._absolute_url(path)
def get_parent(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.parent.slug,
},
)
return self._absolute_url(path)
class ChildProjectSerializer(ProjectSerializer):
"""
Serializer to render a Project when listed under ProjectRelationship.
It's exactly the same as ``ProjectSerializer`` but without some fields.
"""
class Meta(ProjectSerializer.Meta):
fields = [
field for field in ProjectSerializer.Meta.fields
if field not in ['subproject_of']
]
class SubprojectSerializer(FlexFieldsModelSerializer):
"""Serializer to render a subproject (``ProjectRelationship``)."""
child = ChildProjectSerializer()
_links = SubprojectLinksSerializer(source='*')
class Meta:
model = ProjectRelationship
fields = [
'child',
'alias',
'_links',
]
class SubprojectDestroySerializer(FlexFieldsModelSerializer):
"""Serializer used to remove a subproject relationship to a Project."""
class Meta:
model = ProjectRelationship
fields = (
'alias',
)
class RedirectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-redirects-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'redirect_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class RedirectSerializerBase(serializers.ModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='create_dt', read_only=True)
modified = serializers.DateTimeField(source='update_dt', read_only=True)
_links = RedirectLinksSerializer(source='*', read_only=True)
type = serializers.ChoiceField(source='redirect_type', choices=REDIRECT_TYPE_CHOICES)
class Meta:
model = Redirect
fields = [
'pk',
'created',
'modified',
'project',
'type',
'from_url',
'to_url',
'_links',
]
class RedirectCreateSerializer(RedirectSerializerBase):
pass
class RedirectDetailSerializer(RedirectSerializerBase):
"""Override RedirectSerializerBase to sanitize the empty fields."""
from_url = serializers.SerializerMethodField()
to_url = serializers.SerializerMethodField()
def get_from_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.from_url or None
def get_to_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.to_url or None
class EnvironmentVariableLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-environmentvariables-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'environmentvariable_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class EnvironmentVariableSerializer(serializers.ModelSerializer):
value = serializers.CharField(write_only=True)
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
_links = EnvironmentVariableLinksSerializer(source='*', read_only=True)
class Meta:
model = EnvironmentVariable
fields = [
'pk',
'created',
'modified',
'name',
'value',
'public',
'project',
'_links',
]
class OrganizationLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
projects = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'organizations-detail',
kwargs={
'organization_slug': obj.slug,
})
return self._absolute_url(path)
def get_projects(self, obj):
path = reverse(
'organizations-projects-list',
kwargs={
'parent_lookup_organizations__slug': obj.slug,
},
)
return self._absolute_url(path)
class TeamSerializer(FlexFieldsModelSerializer):
# TODO: add ``projects`` as flex field when we have a
# /organizations/<slug>/teams/<slug>/projects endpoint
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
class Meta:
model = Team
fields = (
'name',
'slug',
'created',
'modified',
'access',
)
expandable_fields = {
'members': (UserSerializer, {'many': True}),
}
class OrganizationSerializer(FlexFieldsModelSerializer):
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
owners = UserSerializer(many=True)
_links = OrganizationLinksSerializer(source='*')
class Meta:
model = Organization
fields = (
'name',
'description',
'url',
'slug',
'email',
'owners',
'created',
'modified',
'disabled',
'_links',
)
expandable_fields = {
'projects': (ProjectSerializer, {'many': True}),
'teams': (TeamSerializer, {'many': True}),
}
class RemoteOrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = RemoteOrganization
fields = [
'pk',
'slug',
'name',
'avatar_url',
'url',
'vcs_provider',
'created',
'modified',
]
read_only_fields = fields
class RemoteRepositorySerializer(FlexFieldsModelSerializer):
admin = serializers.SerializerMethodField('is_admin')
class Meta:
model = RemoteRepository
fields = [
'pk',
'name',
'full_name',
'description',
'admin',
'avatar_url',
'ssh_url',
'clone_url',
'html_url',
'vcs',
'vcs_provider',
'private',
'default_branch',
'created',
'modified',
]
read_only_fields = fields
expandable_fields = {
'remote_organization': (
RemoteOrganizationSerializer, {'source': 'organization'}
),
'projects': (
ProjectSerializer, {'many': True}
)
}
def is_admin(self, obj):
request = self.context['request']
# Use annotated value from RemoteRepositoryViewSet queryset
if hasattr(obj, '_admin'):
return obj._admin
return obj.remote_repository_relations.filter(
user=request.user, admin=True
).exists()
| readthedocs/api/v3/serializers.py | 27,758 | Render ``Build.config`` property without modifying it.
.. note::
Any change on the output of that property will be reflected here,
which may produce incompatible changes in the API.
Used when triggering (create action) a ``Build`` for a specific ``Version``.
This serializer validates that no field is sent at all in the request.
Serializer to render a Project when listed under ProjectRelationship.
It's exactly the same as ``ProjectSerializer`` but without some fields.
Serializer used to Import a Project.
Project serializer.
.. note::
When using organizations, projects don't have the concept of users.
But we have organization.users.
Serializer with all the user-facing URLs under Read the Docs.
Serializer used to modify a Project once imported.
Override RedirectSerializerBase to sanitize the empty fields.
Serializer used to define a Project as subproject of another Project.
Serializer used to remove a subproject relationship to a Project.
Serializer to render a subproject (``ProjectRelationship``).
Used when modifying (update action) a ``Version``.
It only allows to make the Version active/non-active.
Return ``None`` if the build is not finished.
This is needed because ``default=True`` in the model field.
pylint: disable=arguments-differ For now, we want to return the ``config`` object as it is without manipulating it. Keep backward compatibility Settings Advanced Settings -> General Settings NOTE: we do not allow to change any setting that can be set via the YAML config file. TODO: adapt these fields with the proper names in the db and then remove them from here NOTE: ``expandable_fields`` must not be included here. Otherwise, they will be tried to be rendered and fail 'users', 'active_versions', NOTE: this has to be a Model method, can't be a ``SerializerMethodField`` as far as I know NOTE: we cannot have a Project with multiple organizations. Overridden only to return ``None`` when the project_url is ``''`` Give users a better error message. Check there is not a subproject with this alias already pylint: disable=arguments-differ Overridden only to return ``None`` when the description is ``''`` Overridden only to return ``None`` when the description is ``''`` TODO: add ``projects`` as flex field when we have a /organizations/<slug>/teams/<slug>/projects endpoint Use annotated value from RemoteRepositoryViewSet queryset | 2,381 | en | 0.852163 |
# Copyright 2021 Edoardo Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Complexity: O(nlog(n))
def search_in_sorted_matrix(A, x):
for S in A:
if binary_search(S, x):
return True
return False
def binary_search(A, x):
low = 0
high = len(A) - 1
mid = 0
while low <= high:
mid = (high + low) // 2
if A[mid] < x:
low = mid + 1
elif A[mid] > x:
high = mid - 1
else:
return True
return False
mat = [[1, 2, 3, 4, 5], [9, 10, 20, 32, 55]]
print(search_in_sorted_matrix(mat, 56))
| exercises/search_in_sorted_matrix.py | 1,104 | Copyright 2021 Edoardo Riggio Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Complexity: O(nlog(n)) | 574 | en | 0.853956 |
"""Bokeh ELPDPlot."""
import warnings
import bokeh.plotting as bkp
from bokeh.models.annotations import Title
from bokeh.models import ColumnDataSource
import bokeh.models.markers as mk
import numpy as np
from . import backend_kwarg_defaults
from .. import show_layout
from ...plot_utils import _scale_fig_size
from ....rcparams import rcParams, _validate_bokeh_marker
def plot_elpd(
ax,
models,
pointwise_data,
numvars,
figsize,
textsize,
plot_kwargs,
markersize,
xlabels,
coord_labels,
xdata,
threshold,
backend_kwargs,
show,
):
"""Bokeh elpd plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
if numvars == 2:
(figsize, _, _, _, _, markersize) = _scale_fig_size(
figsize, textsize, numvars - 1, numvars - 1
)
plot_kwargs.setdefault("s", markersize)
if ax is None:
backend_kwargs.setdefault("width", int(figsize[0] * dpi))
backend_kwargs.setdefault("height", int(figsize[1] * dpi))
ax = bkp.figure(**backend_kwargs)
ydata = pointwise_data[0] - pointwise_data[1]
_plot_atomic_elpd(
ax, xdata, ydata, *models, threshold, coord_labels, xlabels, True, True, plot_kwargs
)
show_layout(ax, show)
else:
max_plots = (
numvars ** 2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"]
)
vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)
if vars_to_plot < numvars:
warnings.warn(
"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
"of resulting ELPD pairwise plots with these variables, generating only a "
"{side}x{side} grid".format(max_plots=max_plots, side=vars_to_plot),
UserWarning,
)
numvars = vars_to_plot
(figsize, _, _, _, _, markersize) = _scale_fig_size(
figsize, textsize, numvars - 2, numvars - 2
)
plot_kwargs.setdefault("s", markersize)
if ax is None:
ax = []
for row in range(numvars - 1):
ax_row = []
for col in range(numvars - 1):
if row == 0 and col == 0:
ax_first = bkp.figure(
width=int(figsize[0] / (numvars - 1) * dpi),
height=int(figsize[1] / (numvars - 1) * dpi),
**backend_kwargs
)
ax_row.append(ax_first)
elif row < col:
ax_row.append(None)
else:
ax_row.append(
bkp.figure(
width=int(figsize[0] / (numvars - 1) * dpi),
height=int(figsize[1] / (numvars - 1) * dpi),
x_range=ax_first.x_range,
y_range=ax_first.y_range,
**backend_kwargs
)
)
ax.append(ax_row)
ax = np.array(ax)
for i in range(0, numvars - 1):
var1 = pointwise_data[i]
for j in range(0, numvars - 1):
if j < i:
continue
var2 = pointwise_data[j + 1]
ydata = var1 - var2
_plot_atomic_elpd(
ax[j, i],
xdata,
ydata,
models[i],
models[j + 1],
threshold,
coord_labels,
xlabels,
j == numvars - 2,
i == 0,
plot_kwargs,
)
show_layout(ax, show)
return ax
def _plot_atomic_elpd(
ax_,
xdata,
ydata,
model1,
model2,
threshold,
coord_labels,
xlabels,
xlabels_shown,
ylabels_shown,
plot_kwargs,
):
marker = _validate_bokeh_marker(plot_kwargs.get("marker"))
marker_func = getattr(mk, marker)
sizes = np.ones(len(xdata)) * plot_kwargs.get("s")
glyph = marker_func(
x="xdata", y="ydata", size="sizes", line_color=plot_kwargs.get("color", "black")
)
source = ColumnDataSource(dict(xdata=xdata, ydata=ydata, sizes=sizes))
ax_.add_glyph(source, glyph)
if threshold is not None:
diff_abs = np.abs(ydata - ydata.mean())
bool_ary = diff_abs > threshold * ydata.std()
if coord_labels is None:
coord_labels = xdata.astype(str)
outliers = np.argwhere(bool_ary).squeeze()
for outlier in outliers:
label = coord_labels[outlier]
ax_.text(
x=np.asarray(outlier), y=np.asarray(ydata[outlier]), text=label, text_color="black",
)
if ylabels_shown:
ax_.yaxis.axis_label = "ELPD difference"
else:
ax_.yaxis.minor_tick_line_color = None
ax_.yaxis.major_label_text_font_size = "0pt"
if xlabels_shown:
if xlabels:
ax_.xaxis.ticker = np.arange(0, len(coord_labels))
ax_.xaxis.major_label_overrides = {
str(key): str(value)
for key, value in zip(np.arange(0, len(coord_labels)), list(coord_labels))
}
else:
ax_.xaxis.minor_tick_line_color = None
ax_.xaxis.major_label_text_font_size = "0pt"
title = Title()
title.text = "{} - {}".format(model1, model2)
ax_.title = title
| arviz/plots/backends/bokeh/elpdplot.py | 5,830 | Bokeh elpd plot.
Bokeh ELPDPlot. | 32 | en | 0.158796 |
# ============================================================================
# FILE: default.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import re
import typing
from denite.util import echo, error, clearmatch, regex_convert_py_vim
from denite.util import Nvim, UserContext, Candidates, Candidate
from denite.parent import SyncParent
class Default(object):
@property
def is_async(self) -> bool:
return self._is_async
def __init__(self, vim: Nvim) -> None:
self._vim = vim
self._denite: typing.Optional[SyncParent] = None
self._selected_candidates: typing.List[int] = []
self._candidates: Candidates = []
self._cursor = 0
self._entire_len = 0
self._result: typing.List[typing.Any] = []
self._context: UserContext = {}
self._bufnr = -1
self._winid = -1
self._winrestcmd = ''
self._initialized = False
self._winheight = 0
self._winwidth = 0
self._winminheight = -1
self._is_multi = False
self._is_async = False
self._matched_pattern = ''
self._displayed_texts: typing.List[str] = []
self._statusline_sources = ''
self._titlestring = ''
self._ruler = False
self._prev_action = ''
self._prev_status: typing.Dict[str, typing.Any] = {}
self._prev_curpos: typing.List[typing.Any] = []
self._save_window_options: typing.Dict[str, typing.Any] = {}
self._sources_history: typing.List[typing.Any] = []
self._previous_text = ''
self._floating = False
self._filter_floating = False
self._updated = False
self._timers: typing.Dict[str, int] = {}
self._matched_range_id = -1
self._matched_char_id = -1
self._check_matchdelete = bool(self._vim.call(
'denite#util#check_matchdelete'))
def start(self, sources: typing.List[typing.Any],
context: UserContext) -> typing.List[typing.Any]:
if not self._denite:
# if hasattr(self._vim, 'run_coroutine'):
# self._denite = ASyncParent(self._vim)
# else:
self._denite = SyncParent(self._vim)
self._result = []
context['sources_queue'] = [sources]
self._start_sources_queue(context)
return self._result
def do_action(self, action_name: str,
command: str = '', is_manual: bool = False) -> None:
if is_manual:
candidates = self._get_selected_candidates()
elif self._get_cursor_candidate():
candidates = [self._get_cursor_candidate()]
else:
candidates = []
if not self._denite or not candidates or not action_name:
return
self._prev_action = action_name
action = self._denite.get_action(
self._context, action_name, candidates)
if not action:
return
post_action = self._context['post_action']
is_quit = action['is_quit'] or post_action == 'quit'
if is_quit:
self.quit()
self._denite.do_action(self._context, action_name, candidates)
self._result = candidates
if command != '':
self._vim.command(command)
if is_quit and post_action == 'open':
# Re-open denite buffer
prev_cursor = self._cursor
cursor_candidate = self._get_cursor_candidate()
self._init_buffer()
self.redraw(False)
if cursor_candidate == self._get_candidate(prev_cursor):
# Restore the cursor
self._move_to_pos(prev_cursor)
# Disable quit flag
is_quit = False
if not is_quit and is_manual:
self._selected_candidates = []
self.redraw(action['is_redraw'])
if is_manual and self._context['sources_queue']:
self._context['input'] = ''
self._context['quick_move'] = ''
self._start_sources_queue(self._context)
return
def redraw(self, is_force: bool = True) -> None:
self._context['is_redraw'] = is_force
if is_force:
self._gather_candidates()
if self._update_candidates():
self._update_buffer()
else:
self._update_status()
self._context['is_redraw'] = False
def quit(self) -> None:
if self._denite:
self._denite.on_close(self._context)
self._quit_buffer()
self._result = []
return
def _restart(self) -> None:
self._context['input'] = ''
self._quit_buffer()
self._init_denite()
self._gather_candidates()
self._init_buffer()
self._update_candidates()
self._update_buffer()
def _start_sources_queue(self, context: UserContext) -> None:
if not context['sources_queue']:
return
self._sources_history.append({
'sources': context['sources_queue'][0],
'path': context['path'],
})
self._start(context['sources_queue'][0], context)
if context['sources_queue']:
context['sources_queue'].pop(0)
context['path'] = self._context['path']
def _start(self, sources: typing.List[typing.Any],
context: UserContext) -> None:
from denite.ui.map import do_map
self._vim.command('silent! autocmd! denite')
if re.search(r'\[Command Line\]$', self._vim.current.buffer.name):
# Ignore command line window.
return
resume = self._initialized and context['resume']
if resume:
# Skip the initialization
update = ('immediately', 'immediately_1',
'cursor_pos', 'prev_winid',
'start_filter', 'quick_move')
for key in update:
self._context[key] = context[key]
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
if context['refresh']:
self.redraw()
self._move_to_pos(self._cursor)
else:
if self._context != context:
self._context.clear()
self._context.update(context)
self._context['sources'] = sources
self._context['is_redraw'] = False
self._is_multi = len(sources) > 1
if not sources:
# Ignore empty sources.
error(self._vim, 'Empty sources')
return
self._init_denite()
self._gather_candidates()
self._update_candidates()
self._init_cursor()
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
self._update_displayed_texts()
self._update_buffer()
self._move_to_pos(self._cursor)
if self._context['quick_move'] and do_map(self, 'quick_move', []):
return
if self._context['start_filter']:
do_map(self, 'open_filter_buffer', [])
def _init_buffer(self) -> None:
self._prev_status = dict()
self._displayed_texts = []
self._prev_bufnr = self._vim.current.buffer.number
self._prev_curpos = self._vim.call('getcurpos')
self._prev_wininfo = self._get_wininfo()
self._prev_winid = int(self._context['prev_winid'])
self._winrestcmd = self._vim.call('winrestcmd')
self._ruler = self._vim.options['ruler']
self._switch_buffer()
self._bufnr = self._vim.current.buffer.number
self._winid = self._vim.call('win_getid')
self._resize_buffer(True)
self._winheight = self._vim.current.window.height
self._winwidth = self._vim.current.window.width
self._bufvars = self._vim.current.buffer.vars
self._bufvars['denite'] = {
'buffer_name': self._context['buffer_name'],
}
self._bufvars['denite_statusline'] = {}
self._vim.vars['denite#_previewed_buffers'] = {}
self._save_window_options = {}
window_options = {
'colorcolumn',
'concealcursor',
'conceallevel',
'cursorcolumn',
'cursorline',
'foldcolumn',
'foldenable',
'list',
'number',
'relativenumber',
'signcolumn',
'spell',
'winfixheight',
'wrap',
}
for k in window_options:
self._save_window_options[k] = self._vim.current.window.options[k]
# Note: Have to use setlocal instead of "current.window.options"
# "current.window.options" changes global value instead of local in
# neovim.
self._vim.command('setlocal colorcolumn=')
self._vim.command('setlocal conceallevel=3')
self._vim.command('setlocal concealcursor=inv')
self._vim.command('setlocal nocursorcolumn')
self._vim.command('setlocal nofoldenable')
self._vim.command('setlocal foldcolumn=0')
self._vim.command('setlocal nolist')
self._vim.command('setlocal nonumber')
self._vim.command('setlocal norelativenumber')
self._vim.command('setlocal nospell')
self._vim.command('setlocal winfixheight')
self._vim.command('setlocal nowrap')
self._vim.command('setlocal signcolumn=no')
if self._context['cursorline']:
self._vim.command('setlocal cursorline')
options = self._vim.current.buffer.options
if self._floating:
# Disable ruler
self._vim.options['ruler'] = False
options['buftype'] = 'nofile'
options['bufhidden'] = 'delete'
options['swapfile'] = False
options['buflisted'] = False
options['modeline'] = False
options['modifiable'] = False
options['filetype'] = 'denite'
if self._vim.call('exists', '#WinEnter'):
self._vim.command('doautocmd WinEnter')
if self._vim.call('exists', '#BufWinEnter'):
self._vim.command('doautocmd BufWinEnter')
if not self._vim.call('has', 'nvim'):
# In Vim8, FileType autocmd is not fired after set filetype option.
self._vim.command('silent doautocmd FileType denite')
if self._context['auto_action']:
self._vim.command('autocmd denite '
'CursorMoved <buffer> '
'call denite#call_map("auto_action")')
self._init_syntax()
def _switch_buffer(self) -> None:
split = self._context['split']
if (split != 'no' and self._winid > 0 and
self._vim.call('win_gotoid', self._winid)):
if split != 'vertical' and not self._floating:
# Move the window to bottom
self._vim.command('wincmd J')
self._winrestcmd = ''
return
self._floating = split in ['floating', 'floating_relative']
self._filter_floating = False
command = 'edit'
if split == 'tab':
self._vim.command('tabnew')
elif self._floating:
# Use floating window
if self._vim.current.buffer.options['filetype'] != 'denite':
self._titlestring = self._vim.options['titlestring']
if split == 'floating':
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'editor',
'row': int(self._context['winrow']),
'col': int(self._context['wincol']),
'width': int(self._context['winwidth']),
'height': int(self._context['winheight']),
})
elif split == 'floating_relative':
opened_pos = (self._vim.call('nvim_win_get_position', 0)[0] +
self._vim.call('winline') - 1)
if self._context['auto_resize']:
height = max(self._winheight, 1)
width = max(self._winwidth, 1)
else:
width = int(self._context['winwidth'])
height = int(self._context['winheight'])
if opened_pos + height + 3 > self._vim.eval('&lines'):
anchor = 'SW'
row = 0
self._context['filter_winrow'] = row + opened_pos
else:
anchor = 'NW'
row = 1
self._context['filter_winrow'] = row + height + opened_pos
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'cursor',
'row': row,
'col': 0,
'width': width,
'height': height,
'anchor': anchor,
})
elif self._context['filter_split_direction'] == 'floating':
self._titlestring = self._vim.options['titlestring']
self._filter_floating = True
elif split != 'no':
command = self._get_direction()
command += ' vsplit' if split == 'vertical' else ' split'
bufname = '[denite]-' + self._context['buffer_name']
if self._vim.call('exists', '*bufadd'):
bufnr = self._vim.call('bufadd', bufname)
vertical = 'vertical' if split == 'vertical' else ''
command = (
'buffer' if split
in ['no', 'tab', 'floating', 'floating_relative'] else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._get_direction(),
vertical,
command,
bufnr,
)
)
else:
self._vim.call(
'denite#util#execute_path',
f'silent keepalt {command}', bufname)
def _get_direction(self) -> str:
direction = str(self._context['direction'])
if direction == 'dynamictop' or direction == 'dynamicbottom':
self._update_displayed_texts()
winwidth = self._vim.call('winwidth', 0)
is_fit = not [x for x in self._displayed_texts
if self._vim.call('strwidth', x) > winwidth]
if direction == 'dynamictop':
direction = 'aboveleft' if is_fit else 'topleft'
else:
direction = 'belowright' if is_fit else 'botright'
return direction
def _get_wininfo(self) -> typing.List[typing.Any]:
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'), self._vim.call('tabpagebuflist')
]
def _switch_prev_buffer(self) -> None:
if (self._prev_bufnr == self._bufnr or
self._vim.buffers[self._prev_bufnr].name == ''):
self._vim.command('enew')
else:
self._vim.command('buffer ' + str(self._prev_bufnr))
def _init_syntax(self) -> None:
self._vim.command('syntax case ignore')
self._vim.command('highlight default link deniteInput ModeMsg')
self._vim.command('highlight link deniteMatchedRange ' +
self._context['highlight_matched_range'])
self._vim.command('highlight link deniteMatchedChar ' +
self._context['highlight_matched_char'])
self._vim.command('highlight default link ' +
'deniteStatusLinePath Comment')
self._vim.command('highlight default link ' +
'deniteStatusLineNumber LineNR')
self._vim.command('highlight default link ' +
'deniteSelectedLine Statement')
if self._floating:
self._vim.current.window.options['winhighlight'] = (
'Normal:' + self._context['highlight_window_background']
)
self._vim.command(('syntax match deniteSelectedLine /^[%s].*/' +
' contains=deniteConcealedMark') % (
self._context['selected_icon']))
self._vim.command(('syntax match deniteConcealedMark /^[ %s]/' +
' conceal contained') % (
self._context['selected_icon']))
if self._denite:
self._denite.init_syntax(self._context, self._is_multi)
def _update_candidates(self) -> bool:
if not self._denite:
return False
[self._is_async, pattern, statuses, self._entire_len,
self._candidates] = self._denite.filter_candidates(self._context)
prev_displayed_texts = self._displayed_texts
self._update_displayed_texts()
prev_matched_pattern = self._matched_pattern
self._matched_pattern = pattern
prev_statusline_sources = self._statusline_sources
self._statusline_sources = ' '.join(statuses)
if self._is_async:
self._start_timer('update_candidates')
else:
self._stop_timer('update_candidates')
updated = (self._displayed_texts != prev_displayed_texts or
self._matched_pattern != prev_matched_pattern or
self._statusline_sources != prev_statusline_sources)
if updated:
self._updated = True
self._start_timer('update_buffer')
if self._context['search'] and self._context['input']:
self._vim.call('setreg', '/', self._context['input'])
return self._updated
def _update_displayed_texts(self) -> None:
candidates_len = len(self._candidates)
if not self._is_async and self._context['auto_resize']:
winminheight = int(self._context['winminheight'])
max_height = min(int(self._context['winheight']),
self._get_max_height())
if (winminheight != -1 and candidates_len < winminheight):
self._winheight = winminheight
elif candidates_len > max_height:
self._winheight = max_height
elif candidates_len != self._winheight:
self._winheight = candidates_len
max_source_name_len = 0
if self._candidates:
max_source_name_len = max([
len(self._get_display_source_name(x['source_name']))
for x in self._candidates])
self._context['max_source_name_len'] = max_source_name_len
self._context['max_source_name_format'] = (
'{:<' + str(self._context['max_source_name_len']) + '}')
self._displayed_texts = [
self._get_candidate_display_text(i)
for i in range(0, candidates_len)
]
def _update_buffer(self) -> None:
is_current_buffer = self._bufnr == self._vim.current.buffer.number
self._update_status()
if self._check_matchdelete and self._context['match_highlight']:
matches = [x['id'] for x in
self._vim.call('getmatches', self._winid)]
if self._matched_range_id in matches:
self._vim.call('matchdelete',
self._matched_range_id, self._winid)
self._matched_range_id = -1
if self._matched_char_id in matches:
self._vim.call('matchdelete',
self._matched_char_id, self._winid)
self._matched_char_id = -1
if self._matched_pattern != '':
self._matched_range_id = self._vim.call(
'matchadd', 'deniteMatchedRange',
r'\c' + regex_convert_py_vim(self._matched_pattern),
10, -1, {'window': self._winid})
matched_char_pattern = '[{}]'.format(re.sub(
r'([\[\]\\^-])',
r'\\\1',
self._context['input'].replace(' ', '')
))
self._matched_char_id = self._vim.call(
'matchadd', 'deniteMatchedChar',
matched_char_pattern,
10, -1, {'window': self._winid})
prev_linenr = self._vim.call('line', '.')
prev_candidate = self._get_cursor_candidate()
buffer = self._vim.buffers[self._bufnr]
buffer.options['modifiable'] = True
self._vim.vars['denite#_candidates'] = [
x['word'] for x in self._candidates]
buffer[:] = self._displayed_texts
buffer.options['modifiable'] = False
self._previous_text = self._context['input']
self._resize_buffer(is_current_buffer)
is_changed = (self._context['reversed'] or
(is_current_buffer and
self._previous_text != self._context['input']))
if self._updated and is_changed:
if not is_current_buffer:
save_winid = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
self._init_cursor()
self._move_to_pos(self._cursor)
if not is_current_buffer:
self._vim.call('win_gotoid', save_winid)
elif is_current_buffer:
self._vim.call('cursor', [prev_linenr, 0])
if is_current_buffer:
if (self._context['auto_action'] and
prev_candidate != self._get_cursor_candidate()):
self.do_action(self._context['auto_action'])
self._updated = False
self._stop_timer('update_buffer')
def _update_status(self) -> None:
inpt = ''
if self._context['input']:
inpt = self._context['input'] + ' '
if self._context['error_messages']:
inpt = '[ERROR] ' + inpt
path = '[' + self._context['path'] + ']'
status = {
'input': inpt,
'sources': self._statusline_sources,
'path': path,
# Extra
'buffer_name': self._context['buffer_name'],
'line_total': len(self._candidates),
}
if status == self._prev_status:
return
self._bufvars['denite_statusline'] = status
self._prev_status = status
linenr = "printf('%'.(len(line('$'))+2).'d/%d',line('.'),line('$'))"
if self._context['statusline']:
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = (
"%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} " +
" %{denite#get_status('path')}%*" +
"%{" + linenr + "}%*")
else:
winnr = self._vim.call('win_id2win', self._winid)
self._vim.call('setwinvar', winnr, '&statusline', (
"%#deniteInput#%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} %=" +
"%#deniteStatusLinePath# %{denite#get_status('path')}%*" +
"%#deniteStatusLineNumber#%{" + linenr + "}%*"))
def _get_display_source_name(self, name: str) -> str:
source_names = self._context['source_names']
if not self._is_multi or source_names == 'hide':
source_name = ''
else:
short_name = (re.sub(r'([a-zA-Z])[a-zA-Z]+', r'\1', name)
if re.search(r'[^a-zA-Z]', name) else name[:2])
source_name = short_name if source_names == 'short' else name
return source_name
def _get_candidate_display_text(self, index: int) -> str:
source_names = self._context['source_names']
candidate = self._candidates[index]
terms = []
if self._is_multi and source_names != 'hide':
terms.append(self._context['max_source_name_format'].format(
self._get_display_source_name(candidate['source_name'])))
encoding = self._context['encoding']
abbr = candidate.get('abbr', candidate['word']).encode(
encoding, errors='replace').decode(encoding, errors='replace')
terms.append(abbr[:int(self._context['max_candidate_width'])])
return (self._context['selected_icon'] # type: ignore
if index in self._selected_candidates
else ' ') + ' '.join(terms).replace('\n', '')
def _get_max_height(self) -> int:
return int(self._vim.options['lines']) if not self._floating else (
int(self._vim.options['lines']) -
int(self._context['winrow']) -
int(self._vim.options['cmdheight']))
def _resize_buffer(self, is_current_buffer: bool) -> None:
split = self._context['split']
if (split == 'no' or split == 'tab' or
self._vim.call('winnr', '$') == 1):
return
winheight = max(self._winheight, 1)
winwidth = max(self._winwidth, 1)
is_vertical = split == 'vertical'
if not is_current_buffer:
restore = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
if not is_vertical and self._vim.current.window.height != winheight:
if self._floating:
wincol = int(self._context['winrow'])
row = wincol
if split == 'floating':
if self._context['auto_resize'] and row > 1:
row += int(self._context['winheight'])
row -= self._winheight
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'editor',
'row': row,
'col': int(self._context['wincol']),
'width': winwidth,
'height': winheight,
})
filter_row = 0 if wincol == 1 else row + winheight
filter_col = int(self._context['wincol'])
elif split == 'floating_relative':
init_pos = self._vim.call('nvim_win_get_config',
self._winid)
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'win',
'win': init_pos['win'],
'row': init_pos['row'],
'col': init_pos['col'],
'width': winwidth,
'height': winheight,
})
filter_col = init_pos['col']
if init_pos['anchor'] == 'NW':
winpos = self._vim.call('nvim_win_get_position',
self._winid)
filter_row = winpos[0] + winheight
filter_winid = self._vim.vars['denite#_filter_winid']
self._context['filter_winrow'] = row
if self._vim.call('win_id2win', filter_winid) > 0:
self._vim.call('nvim_win_set_config', filter_winid, {
'relative': 'editor',
'row': filter_row,
'col': filter_col,
})
self._vim.command('resize ' + str(winheight))
if self._context['reversed']:
self._vim.command('normal! zb')
elif is_vertical and self._vim.current.window.width != winwidth:
self._vim.command('vertical resize ' + str(winwidth))
if not is_current_buffer:
self._vim.call('win_gotoid', restore)
def _check_do_option(self) -> bool:
if self._context['do'] != '':
self._do_command(self._context['do'])
return True
elif (self._candidates and self._context['immediately'] or
len(self._candidates) == 1 and self._context['immediately_1']):
self._do_immediately()
return True
return not (self._context['empty'] or
self._is_async or self._candidates)
def _check_move_option(self) -> None:
if self._context['cursor_pos'].isnumeric():
self._cursor = int(self._context['cursor_pos']) + 1
elif re.match(r'\+\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_next_line()
elif re.match(r'-\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_prev_line()
elif self._context['cursor_pos'] == '$':
self._move_to_last_line()
def _do_immediately(self) -> None:
goto = self._winid > 0 and self._vim.call(
'win_gotoid', self._winid)
if goto:
# Jump to denite window
self._init_buffer()
self.do_action('default')
candidate = self._get_cursor_candidate()
if not candidate:
return
echo(self._vim, 'Normal', '[{}/{}] {}'.format(
self._cursor, len(self._candidates),
candidate.get('abbr', candidate['word'])))
if goto:
# Move to the previous window
self._vim.command('wincmd p')
def _do_command(self, command: str) -> None:
self._init_cursor()
cursor = 1
while cursor < len(self._candidates):
self.do_action('default', command)
self._move_to_next_line()
self._quit_buffer()
def _cleanup(self) -> None:
self._stop_timer('update_candidates')
self._stop_timer('update_buffer')
if self._vim.current.buffer.number == self._bufnr:
self._cursor = self._vim.call('line', '.')
# Note: Close filter window before preview window
self._vim.call('denite#filter#_close_filter_window')
if not self._context['has_preview_window']:
self._vim.command('pclose!')
# Clear previewed buffers
for bufnr in self._vim.vars['denite#_previewed_buffers'].keys():
if not self._vim.call('win_findbuf', bufnr):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['denite#_previewed_buffers'] = {}
self._vim.command('highlight! link CursorLine CursorLine')
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = self._titlestring
self._vim.options['ruler'] = self._ruler
def _close_current_window(self) -> None:
if self._vim.call('winnr', '$') == 1:
self._vim.command('buffer #')
else:
self._vim.command('close!')
def _quit_buffer(self) -> None:
self._cleanup()
if self._vim.call('bufwinnr', self._bufnr) < 0:
# Denite buffer is already closed
return
winids = self._vim.call('win_findbuf',
self._vim.vars['denite#_filter_bufnr'])
if winids:
# Quit filter buffer
self._vim.call('win_gotoid', winids[0])
self._close_current_window()
# Move to denite window
self._vim.call('win_gotoid', self._winid)
# Restore the window
if self._context['split'] == 'no':
self._switch_prev_buffer()
for k, v in self._save_window_options.items():
self._vim.current.window.options[k] = v
else:
if self._context['split'] == 'tab':
self._vim.command('tabclose!')
if self._context['split'] != 'tab':
self._close_current_window()
self._vim.call('win_gotoid', self._prev_winid)
# Restore the position
self._vim.call('setpos', '.', self._prev_curpos)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
# Note: execute restcmd twice to restore layout properly
self._vim.command(self._winrestcmd)
self._vim.command(self._winrestcmd)
clearmatch(self._vim)
def _get_cursor_candidate(self) -> Candidate:
return self._get_candidate(self._cursor)
def _get_candidate(self, pos: int) -> Candidate:
if not self._candidates or pos > len(self._candidates):
return {}
return self._candidates[pos - 1]
def _get_selected_candidates(self) -> Candidates:
if not self._selected_candidates:
return [self._get_cursor_candidate()
] if self._get_cursor_candidate() else []
return [self._candidates[x] for x in self._selected_candidates]
def _init_denite(self) -> None:
if self._denite:
self._denite.start(self._context)
self._denite.on_init(self._context)
self._initialized = True
self._winheight = int(self._context['winheight'])
self._winwidth = int(self._context['winwidth'])
def _gather_candidates(self) -> None:
self._selected_candidates = []
if self._denite:
self._denite.gather_candidates(self._context)
def _init_cursor(self) -> None:
if self._context['reversed']:
self._move_to_last_line()
self._vim.command('normal! zb')
else:
self._move_to_first_line()
def _move_to_pos(self, pos: int) -> None:
self._vim.call('cursor', pos, 0)
self._cursor = pos
def _move_to_next_line(self) -> None:
if self._cursor < len(self._candidates):
self._cursor += 1
def _move_to_prev_line(self) -> None:
if self._cursor >= 1:
self._cursor -= 1
def _move_to_first_line(self) -> None:
self._cursor = 1
def _move_to_last_line(self) -> None:
self._cursor = len(self._candidates)
def _start_timer(self, key: str) -> None:
if key in self._timers:
return
if key == 'update_candidates':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_candidates_timer', self._bufnr)
elif key == 'update_buffer':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_buffer_timer', self._bufnr)
def _stop_timer(self, key: str) -> None:
if key not in self._timers:
return
self._vim.call('timer_stop', self._timers[key])
# Note: After timer_stop is called, self._timers may be removed
if key in self._timers:
self._timers.pop(key)
| rplugin/python3/denite/ui/default.py | 35,078 | ============================================================================ FILE: default.py AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com> License: MIT license ============================================================================ if hasattr(self._vim, 'run_coroutine'): self._denite = ASyncParent(self._vim) else: Re-open denite buffer Restore the cursor Disable quit flag Ignore command line window. Skip the initialization Ignore empty sources. Note: Have to use setlocal instead of "current.window.options" "current.window.options" changes global value instead of local in neovim. Disable ruler In Vim8, FileType autocmd is not fired after set filetype option. Move the window to bottom Use floating window Extra type: ignore Jump to denite window Move to the previous window Note: Close filter window before preview window Clear previewed buffers Denite buffer is already closed Quit filter buffer Move to denite window Restore the window Restore the position Note: execute restcmd twice to restore layout properly Note: After timer_stop is called, self._timers may be removed | 1,100 | en | 0.643764 |
"""
"""
from __future__ import division
from torch.optim.optimizer import Optimizer, required
import numpy as np
import torch
from typing import NamedTuple, List
from dataclasses import dataclass
from enum import Enum
from typing import Union, Tuple
# from scipy.sparse.linalg import svds
from scipy.optimize import minimize_scalar
class LayerType(Enum):
CONV = 1
FC = 2
NON_CONV = 3
@dataclass
class LayerMetrics:
rank: float
KG: float
condition: float
@dataclass
class ConvLayerMetrics:
input_channel: LayerMetrics
output_channel: LayerMetrics
class LRMetrics(NamedTuple):
rank_velocity: List[float]
r_conv: List[float]
def EVBMF(Y, sigma2=None, H=None):
"""Implementation of the analytical solution to Empirical Variational
Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to
empirical VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix
factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free
energy.
If H is unspecified, it is set to the smallest of the sides of the
input Y.
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of
fully-observed variational Bayesian matrix factorization." Journal of
Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by
variational Bayesian PCA." Advances in Neural Information Processing
Systems. 2012.
"""
L, M = Y.shape # has to be L<=M
if H is None:
H = L
alpha = L / M
tauubar = 2.5129 * np.sqrt(alpha)
# SVD of the input matrix, max rank of H
# U, s, V = np.linalg.svd(Y)
U, s, V = torch.svd(Y)
U = U[:, :H]
s = s[:H]
V = V[:H].T
# Calculate residual
residual = 0.
if H < L:
# residual = np.sum(np.sum(Y**2)-np.sum(s**2))
residual = torch.sum(np.sum(Y**2) - np.sum(s**2))
# Estimation of the variance when sigma2 is unspecified
if sigma2 is None:
xubar = (1 + tauubar) * (1 + alpha / tauubar)
eH_ub = int(np.min([np.ceil(L / (1 + alpha)) - 1, H])) - 1
# upper_bound = (np.sum(s**2)+residual)/(L*M)
# lower_bound = np.max(
# [s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M])
upper_bound = (torch.sum(s**2) + residual) / (L * M)
lower_bound = torch.max(torch.stack(
[s[eH_ub + 1]**2 / (M * xubar), torch.mean(s[eH_ub + 1:]**2) / M], dim=0))
scale = 1. # /lower_bound
s = s * np.sqrt(scale)
residual = residual * scale
lower_bound = lower_bound * scale
upper_bound = upper_bound * scale
sigma2_opt = minimize_scalar(
EVBsigma2, args=(L, M, s.cpu().numpy(), residual, xubar),
bounds=[lower_bound.cpu().numpy(), upper_bound.cpu().numpy()],
method='Bounded')
sigma2 = sigma2_opt.x
# Threshold gamma term
threshold = np.sqrt(M * sigma2 * (1 + tauubar) * (1 + alpha / tauubar))
# pos = np.sum(s > threshold)
pos = torch.sum(s > threshold)
# Formula (15) from [2]
# d = torch.multiply(s[:pos]/2,
# 1-torch.divide(
# torch.tensor((L+M)*sigma2, device=s.device),
# s[:pos]**2) + torch.sqrt((1-torch.divide(
# torch.tensor(
# (L+M)*sigma2, device=s.device),
# s[:pos]**2))**2 -
# 4*L*M*sigma2**2/s[:pos]**4))
# d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt(
# (1-np.divide((L+M)*sigma2, s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4))
d = (s[:pos] / 2) * (1 - (L + M) * sigma2 / s[:pos]**2
+ torch.sqrt((1 -
(L + M) * sigma2 / s[:pos]**2)**2 - 4 * L * M * sigma2**2 / s[:pos]**4))
# Computation of the posterior
# post = {}
# post['ma'] = np.zeros(H)
# post['mb'] = np.zeros(H)
# post['sa2'] = np.zeros(H)
# post['sb2'] = np.zeros(H)
# post['cacb'] = np.zeros(H)
# tau = np.multiply(d, s[:pos])/(M*sigma2)
# delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau)
# post['ma'][:pos] = np.sqrt(np.multiply(d, delta))
# post['mb'][:pos] = np.sqrt(np.divide(d, delta))
# post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos])
# post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
# post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M))
# post['sigma2'] = sigma2
# post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) +
# (residual+np.sum(s**2))/sigma2 + np.sum(
# M*np.log(tau+1) + L*np.log(tau/alpha + 1) - M*tau))
return U[:, :pos], torch.diag(d), V[:, :pos] # , post
def EVBsigma2(sigma2, L, M, s, residual, xubar):
H = len(s)
alpha = L / M
x = s**2 / (M * sigma2)
z1 = x[x > xubar]
z2 = x[x <= xubar]
tau_z1 = tau(z1, alpha)
term1 = np.sum(z2 - np.log(z2))
term2 = np.sum(z1 - tau_z1)
term3 = np.sum(np.log(np.divide(tau_z1 + 1, z1)))
term4 = alpha * np.sum(np.log(tau_z1 / alpha + 1))
obj = term1 + term2 + term3 + term4 + residual / (M * sigma2) + (L - H) * np.log(sigma2)
return obj
def phi0(x):
return x - np.log(x)
def phi1(x, alpha):
return np.log(tau(x, alpha) + 1) + alpha * np.log(tau(x, alpha) / alpha + 1
) - tau(x, alpha)
def tau(x, alpha):
return 0.5 * (x - (1 + alpha) + np.sqrt((x - (1 + alpha))**2 - 4 * alpha))
class Metrics:
def __init__(self, params, linear: bool = False) -> None:
'''
parameters: list of torch.nn.Module.parameters()
'''
self.params = params
self.history = list()
mask = list()
for param_idx, param in enumerate(params):
param_shape = param.shape
if not linear:
if len(param_shape) != 4:
mask.append(param_idx)
else:
if len(param_shape) != 4 and len(param_shape) != 2:
mask.append(param_idx)
self.mask = set(mask)
def compute_low_rank(self,
tensor: torch.Tensor,
normalizer: float) -> torch.Tensor:
if tensor.requires_grad:
tensor = tensor.detach()
try:
tensor_size = tensor.shape
if tensor_size[0] > tensor_size[1]:
tensor = tensor.T
U_approx, S_approx, V_approx = EVBMF(tensor)
except RuntimeError:
return None, None, None
rank = S_approx.shape[0] / tensor_size[0] # normalizer
low_rank_eigen = torch.diag(S_approx).data.cpu().numpy()
if len(low_rank_eigen) != 0:
condition = low_rank_eigen[0] / low_rank_eigen[-1]
sum_low_rank_eigen = low_rank_eigen / \
max(low_rank_eigen)
sum_low_rank_eigen = np.sum(sum_low_rank_eigen)
else:
condition = 0
sum_low_rank_eigen = 0
KG = sum_low_rank_eigen / tensor_size[0] # normalizer
return rank, KG, condition
def KG(self, epoch: int) -> np.ndarray:
KG_list = list()
for i, (index, metric) in enumerate(self.history[epoch]):
if isinstance(metric, ConvLayerMetrics):
KG_list.append((metric.input_channel.KG
+ metric.output_channel.KG) / 2)
elif isinstance(metric, LayerMetrics):
KG_list.append(metric.KG)
return np.array(KG_list)
def __call__(self) -> List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]]:
'''
Computes the knowledge gain (S) and mapping condition (condition)
'''
metrics: List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]] = list()
for layer_index, layer in enumerate(self.params):
if layer_index in self.mask:
metrics.append((layer_index, None))
continue
# if np.less(np.prod(layer.shape), 10_000):
# metrics.append((layer_index, None))
if len(layer.shape) == 4:
layer_tensor = layer.data
tensor_size = layer_tensor.shape
mode_3_unfold = layer_tensor.permute(1, 0, 2, 3)
mode_3_unfold = torch.reshape(
mode_3_unfold, [tensor_size[1], tensor_size[0]
* tensor_size[2] * tensor_size[3]])
mode_4_unfold = layer_tensor
mode_4_unfold = torch.reshape(
mode_4_unfold, [tensor_size[0], tensor_size[1]
* tensor_size[2] * tensor_size[3]])
in_rank, in_KG, in_condition = self.compute_low_rank(
mode_3_unfold, tensor_size[1])
if in_rank is None and in_KG is None and in_condition is None:
if len(self.history) > 0:
in_rank = self.history[-1][
layer_index][1].input_channel.rank
in_KG = self.history[-1][
layer_index][1].input_channel.KG
in_condition = self.history[-1][
layer_index][1].input_channel.condition
else:
in_rank = in_KG = in_condition = 0.
out_rank, out_KG, out_condition = self.compute_low_rank(
mode_4_unfold, tensor_size[0])
if out_rank is None and out_KG is None and out_condition is None:
if len(self.history) > 0:
out_rank = self.history[-1][
layer_index][1].output_channel.rank
out_KG = self.history[-1][
layer_index][1].output_channel.KG
out_condition = self.history[-1][
layer_index][1].output_channel.condition
else:
out_rank = out_KG = out_condition = 0.
metrics.append((layer_index, ConvLayerMetrics(
input_channel=LayerMetrics(
rank=in_rank,
KG=in_KG,
condition=in_condition),
output_channel=LayerMetrics(
rank=out_rank,
KG=out_KG,
condition=out_condition))))
elif len(layer.shape) == 2:
rank, KG, condition = self.compute_low_rank(
layer, layer.shape[0])
if rank is None and KG is None and condition is None:
if len(self.history) > 0:
rank = self.history[-1][layer_index][1].rank
KG = self.history[-1][layer_index][1].KG
condition = self.history[-1][layer_index][1].condition
else:
rank = KG = condition = 0.
metrics.append((layer_index, LayerMetrics(
rank=rank,
KG=KG,
condition=condition)))
else:
metrics.append((layer_index, None))
self.history.append(metrics)
return metrics
class Adas(Optimizer):
"""
Vectorized SGD from torch.optim.SGD
"""
def __init__(self,
params,
lr: float = required,
beta: float = 0.8,
step_size: int = None,
linear: bool = True,
gamma: float = 1,
momentum: float = 0,
dampening: float = 0,
weight_decay: float = 0,
nesterov: bool = False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening")
super(Adas, self).__init__(params[:2], defaults)
# Adas Specific stuff (not SGD)
if np.less(beta, 0) or np.greater_equal(beta, 1):
raise ValueError(f'Invalid beta: {beta}')
if np.less(gamma, 0):
raise ValueError(f'Invalid gamma: {gamma}')
if step_size is not None:
if np.less_equal(step_size, 0):
raise ValueError(f'Invalid step_size: {step_size}')
self.step_size = step_size
self.gamma = gamma
self.beta = beta
self.metrics = metrics = Metrics(params=params[2]["all_params"], linear=linear)
self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params))
self.velocity = np.zeros(
len(self.metrics.params) - len(self.metrics.mask))
self.not_ready = list(range(len(self.velocity)))
self.init_lr = lr
self.zeta = 1.
self.KG = 0.
def __setstate__(self, state):
super(Adas, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def epoch_step(self, epoch: int) -> None:
self.metrics()
if epoch == 0:
velocity = self.init_lr * np.ones(len(self.velocity))
self.KG = self.metrics.KG(epoch)
else:
KG = self.metrics.KG(epoch)
velocity = KG - self.KG
self.KG = KG
for idx in self.not_ready:
if np.isclose(KG[idx], 0.):
velocity[idx] = self.init_lr - \
self.beta * self.velocity[idx]
else:
self.not_ready.remove(idx)
if self.step_size is not None:
if epoch % self.step_size == 0 and epoch > 0:
self.lr_vector *= self.gamma
self.zeta *= self.gamma
self.velocity = np.maximum(
self.beta * self.velocity + self.zeta * velocity, 0.)
count = 0
for i in range(len(self.metrics.params)):
if i in self.metrics.mask:
self.lr_vector[i] = self.lr_vector[i - (1 if i > 0 else 0)]
else:
self.lr_vector[i] = self.velocity[count]
count += 1
def step(self, closure: callable = None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
iteration_group = 0
for group in self.param_groups:
iteration_group += 1
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p_index, p in enumerate(group['params']):
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(p.data, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(
d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# p.data.add_(-group['lr'], d_p)
p.data.add_(d_p, alpha=-self.lr_vector[p_index])
return loss
| src/transformers/adas.py | 17,213 | Vectorized SGD from torch.optim.SGD
Implementation of the analytical solution to Empirical Variational
Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to
empirical VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix
factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free
energy.
If H is unspecified, it is set to the smallest of the sides of the
input Y.
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of
fully-observed variational Bayesian matrix factorization." Journal of
Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by
variational Bayesian PCA." Advances in Neural Information Processing
Systems. 2012.
Computes the knowledge gain (S) and mapping condition (condition)
parameters: list of torch.nn.Module.parameters()
Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
from scipy.sparse.linalg import svds has to be L<=M SVD of the input matrix, max rank of H U, s, V = np.linalg.svd(Y) Calculate residual residual = np.sum(np.sum(Y**2)-np.sum(s**2)) Estimation of the variance when sigma2 is unspecified upper_bound = (np.sum(s**2)+residual)/(L*M) lower_bound = np.max( [s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M]) /lower_bound Threshold gamma term pos = np.sum(s > threshold) Formula (15) from [2] d = torch.multiply(s[:pos]/2, 1-torch.divide( torch.tensor((L+M)*sigma2, device=s.device), s[:pos]**2) + torch.sqrt((1-torch.divide( torch.tensor( (L+M)*sigma2, device=s.device), s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4)) d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt( (1-np.divide((L+M)*sigma2, s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4)) Computation of the posterior post = {} post['ma'] = np.zeros(H) post['mb'] = np.zeros(H) post['sa2'] = np.zeros(H) post['sb2'] = np.zeros(H) post['cacb'] = np.zeros(H) tau = np.multiply(d, s[:pos])/(M*sigma2) delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau) post['ma'][:pos] = np.sqrt(np.multiply(d, delta)) post['mb'][:pos] = np.sqrt(np.divide(d, delta)) post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos]) post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos])) post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M)) post['sigma2'] = sigma2 post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) + (residual+np.sum(s**2))/sigma2 + np.sum( M*np.log(tau+1) + L*np.log(tau/alpha + 1) - M*tau)) , post normalizer normalizer if np.less(np.prod(layer.shape), 10_000): metrics.append((layer_index, None)) Adas Specific stuff (not SGD) p.data.add_(-group['lr'], d_p) | 3,534 | en | 0.510946 |
"""Unit tests for tftpy."""
import unittest
import logging
import tftpy
import os
import time
import threading
from errno import EINTR
from multiprocessing import Queue
log = tftpy.log
class TestTftpyClasses(unittest.TestCase):
def setUp(self):
tftpy.setLogLevel(logging.DEBUG)
def testTftpPacketRRQ(self):
log.debug("===> Running testcase testTftpPacketRRQ")
options = {}
rrq = tftpy.TftpPacketRRQ()
rrq.filename = 'myfilename'
rrq.mode = 'octet'
rrq.options = options
rrq.encode()
self.assert_(rrq.buffer != None, "Buffer populated")
rrq.decode()
self.assertEqual(rrq.filename, b"myfilename", "Filename correct")
self.assertEqual(rrq.mode, b"octet", "Mode correct")
self.assertEqual(rrq.options, options, "Options correct")
# repeat test with options
rrq.options = { 'blksize': '1024' }
rrq.filename = 'myfilename'
rrq.mode = 'octet'
rrq.encode()
self.assert_(rrq.buffer != None, "Buffer populated")
rrq.decode()
self.assertEqual(rrq.filename, b"myfilename", "Filename correct")
self.assertEqual(rrq.mode, b"octet", "Mode correct")
self.assertEqual(rrq.options['blksize'], '1024', "Blksize correct")
def testTftpPacketWRQ(self):
log.debug("===> Running test case testTftpPacketWRQ")
options = {}
wrq = tftpy.TftpPacketWRQ()
wrq.filename = 'myfilename'
wrq.mode = 'octet'
wrq.options = options
wrq.encode()
self.assert_(wrq.buffer != None, "Buffer populated")
wrq.decode()
self.assertEqual(wrq.opcode, 2, "Opcode correct")
self.assertEqual(wrq.filename, b"myfilename", "Filename correct")
self.assertEqual(wrq.mode, b"octet", "Mode correct")
self.assertEqual(wrq.options, options, "Options correct")
# repeat test with options
wrq.options = { 'blksize': '1024' }
wrq.filename = 'myfilename'
wrq.mode = 'octet'
wrq.encode()
self.assert_(wrq.buffer != None, "Buffer populated")
wrq.decode()
self.assertEqual(wrq.opcode, 2, "Opcode correct")
self.assertEqual(wrq.filename, b"myfilename", "Filename correct")
self.assertEqual(wrq.mode, b"octet", "Mode correct")
self.assertEqual(wrq.options['blksize'], '1024', "Blksize correct")
def testTftpPacketDAT(self):
log.debug("===> Running testcase testTftpPacketDAT")
dat = tftpy.TftpPacketDAT()
dat.blocknumber = 5
data = "this is some data"
dat.data = data
dat.encode()
self.assert_(dat.buffer != None, "Buffer populated")
dat.decode()
self.assertEqual(dat.opcode, 3, "DAT opcode is correct")
self.assertEqual(dat.blocknumber, 5, "Block number is correct")
self.assertEqual(dat.data, data, "DAT data is correct")
def testTftpPacketACK(self):
log.debug("===> Running testcase testTftpPacketACK")
ack = tftpy.TftpPacketACK()
ack.blocknumber = 6
ack.encode()
self.assert_(ack.buffer != None, "Buffer populated")
ack.decode()
self.assertEqual(ack.opcode, 4, "ACK opcode is correct")
self.assertEqual(ack.blocknumber, 6, "ACK blocknumber correct")
def testTftpPacketERR(self):
log.debug("===> Running testcase testTftpPacketERR")
err = tftpy.TftpPacketERR()
err.errorcode = 4
err.encode()
self.assert_(err.buffer != None, "Buffer populated")
err.decode()
self.assertEqual(err.opcode, 5, "ERR opcode is correct")
self.assertEqual(err.errorcode, 4, "ERR errorcode is correct")
def testTftpPacketOACK(self):
log.debug("===> Running testcase testTftpPacketOACK")
oack = tftpy.TftpPacketOACK()
# Test that if we make blksize a number, it comes back a string.
oack.options = { 'blksize': 2048 }
oack.encode()
self.assert_(oack.buffer != None, "Buffer populated")
oack.decode()
self.assertEqual(oack.opcode, 6, "OACK opcode is correct")
self.assertEqual(oack.options['blksize'],
'2048',
"OACK blksize option is correct")
# Test string to string
oack.options = { 'blksize': '4096' }
oack.encode()
self.assert_(oack.buffer != None, "Buffer populated")
oack.decode()
self.assertEqual(oack.opcode, 6, "OACK opcode is correct")
self.assertEqual(oack.options['blksize'],
'4096',
"OACK blksize option is correct")
def testTftpPacketFactory(self):
log.debug("===> Running testcase testTftpPacketFactory")
# Make sure that the correct class is created for the correct opcode.
classes = {
1: tftpy.TftpPacketRRQ,
2: tftpy.TftpPacketWRQ,
3: tftpy.TftpPacketDAT,
4: tftpy.TftpPacketACK,
5: tftpy.TftpPacketERR,
6: tftpy.TftpPacketOACK
}
factory = tftpy.TftpPacketFactory()
for opcode in classes:
self.assert_(isinstance(factory._TftpPacketFactory__create(opcode),
classes[opcode]),
"opcode %d returns the correct class" % opcode)
class TestTftpyState(unittest.TestCase):
def setUp(self):
tftpy.setLogLevel(logging.DEBUG)
def clientServerUploadOptions(self,
options,
input=None,
transmitname=None,
server_kwargs=None):
"""Fire up a client and a server and do an upload."""
root = '/tmp'
home = os.path.dirname(os.path.abspath(__file__))
filename = '640KBFILE'
input_path = os.path.join(home, filename)
if not input:
input = input_path
if transmitname:
filename = transmitname
server_kwargs = server_kwargs or {}
server = tftpy.TftpServer(root, **server_kwargs)
client = tftpy.TftpClient('localhost',
20001,
options)
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
# parent - let the server start
try:
time.sleep(1)
client.upload(filename,
input)
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
else:
server.listen('localhost', 20001)
def clientServerDownloadOptions(self, options, output='/tmp/out'):
"""Fire up a client and a server and do a download."""
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
client = tftpy.TftpClient('localhost',
20001,
options)
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
# parent - let the server start
try:
time.sleep(1)
client.download('640KBFILE',
output)
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
else:
server.listen('localhost', 20001)
def testClientServerNoOptions(self):
self.clientServerDownloadOptions({})
def testClientServerTsizeOptions(self):
self.clientServerDownloadOptions({'tsize': 64*1024})
def testClientFileObject(self):
output = open('/tmp/out', 'w')
self.clientServerDownloadOptions({}, output)
def testClientServerBlksize(self):
for blksize in [512, 1024, 2048, 4096]:
self.clientServerDownloadOptions({'blksize': blksize})
def testClientServerUploadNoOptions(self):
self.clientServerUploadOptions({})
def testClientServerUploadFileObj(self):
fileobj = open('t/640KBFILE', 'r')
self.clientServerUploadOptions({}, input=fileobj)
def testClientServerUploadWithSubdirs(self):
self.clientServerUploadOptions({}, transmitname='foo/bar/640KBFILE')
def testClientServerUploadStartingSlash(self):
self.clientServerUploadOptions({}, transmitname='/foo/bar/640KBFILE')
def testClientServerUploadOptions(self):
for blksize in [512, 1024, 2048, 4096]:
self.clientServerUploadOptions({'blksize': blksize})
def customUploadHelper(self, return_func):
q = Queue()
def upload_open(path, context):
q.put('called')
return return_func(path)
self.clientServerUploadOptions(
{},
server_kwargs={'upload_open': upload_open})
self.assertEqual(q.get(True, 1), 'called')
def testClientServerUploadCustomOpen(self):
self.customUploadHelper(lambda p: open(p, 'wb'))
def testClientServerUploadCustomOpenForbids(self):
with self.assertRaisesRegexp(tftpy.TftpException, 'Access violation'):
self.customUploadHelper(lambda p: None)
def testClientServerUploadTsize(self):
self.clientServerUploadOptions({'tsize': 64*1024}, transmitname='/foo/bar/640KBFILE')
def testClientServerNoOptionsDelay(self):
tftpy.TftpStates.DELAY_BLOCK = 10
self.clientServerDownloadOptions({})
tftpy.TftpStates.DELAY_BLOCK = 0
def testServerNoOptions(self):
raddress = '127.0.0.2'
rport = 10000
timeout = 5
root = os.path.dirname(os.path.abspath(__file__))
# Testing without the dyn_func_file set.
serverstate = tftpy.TftpContextServer(raddress,
rport,
timeout,
root)
self.assertTrue( isinstance(serverstate,
tftpy.TftpContextServer) )
rrq = tftpy.TftpPacketRRQ()
rrq.filename = '640KBFILE'
rrq.mode = 'octet'
rrq.options = {}
# Start the download.
serverstate.start(rrq.encode().buffer)
# At a 512 byte blocksize, this should be 1280 packets exactly.
for block in range(1, 1281):
# Should be in expectack state.
self.assertTrue( isinstance(serverstate.state,
tftpy.TftpStateExpectACK) )
ack = tftpy.TftpPacketACK()
ack.blocknumber = block % 65536
serverstate.state = serverstate.state.handle(ack, raddress, rport)
# The last DAT packet should be empty, indicating a completed
# transfer.
ack = tftpy.TftpPacketACK()
ack.blocknumber = 1281 % 65536
finalstate = serverstate.state.handle(ack, raddress, rport)
self.assertTrue( finalstate is None )
def testServerNoOptionsSubdir(self):
raddress = '127.0.0.2'
rport = 10000
timeout = 5
root = os.path.dirname(os.path.abspath(__file__))
# Testing without the dyn_func_file set.
serverstate = tftpy.TftpContextServer(raddress,
rport,
timeout,
root)
self.assertTrue( isinstance(serverstate,
tftpy.TftpContextServer) )
rrq = tftpy.TftpPacketRRQ()
rrq.filename = '640KBFILE'
rrq.mode = 'octet'
rrq.options = {}
# Start the download.
serverstate.start(rrq.encode().buffer)
# At a 512 byte blocksize, this should be 1280 packets exactly.
for block in range(1, 1281):
# Should be in expectack state, or None
self.assertTrue( isinstance(serverstate.state,
tftpy.TftpStateExpectACK) )
ack = tftpy.TftpPacketACK()
ack.blocknumber = block % 65536
serverstate.state = serverstate.state.handle(ack, raddress, rport)
# The last DAT packet should be empty, indicating a completed
# transfer.
ack = tftpy.TftpPacketACK()
ack.blocknumber = 1281 % 65536
finalstate = serverstate.state.handle(ack, raddress, rport)
self.assertTrue( finalstate is None )
def testServerInsecurePath(self):
raddress = '127.0.0.2'
rport = 10000
timeout = 5
root = os.path.dirname(os.path.abspath(__file__))
serverstate = tftpy.TftpContextServer(raddress,
rport,
timeout,
root)
rrq = tftpy.TftpPacketRRQ()
rrq.filename = '../setup.py'
rrq.mode = 'octet'
rrq.options = {}
# Start the download.
self.assertRaises(tftpy.TftpException,
serverstate.start, rrq.encode().buffer)
def testServerSecurePath(self):
raddress = '127.0.0.2'
rport = 10000
timeout = 5
root = os.path.dirname(os.path.abspath(__file__))
serverstate = tftpy.TftpContextServer(raddress,
rport,
timeout,
root)
rrq = tftpy.TftpPacketRRQ()
rrq.filename = '640KBFILE'
rrq.mode = 'octet'
rrq.options = {}
# Start the download.
serverstate.start(rrq.encode().buffer)
# Should be in expectack state.
self.assertTrue(isinstance(serverstate.state,
tftpy.TftpStateExpectACK))
def testServerDownloadWithStopNow(self, output='/tmp/out'):
log.debug("===> Running testcase testServerDownloadWithStopNow")
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
client = tftpy.TftpClient('localhost',
20001,
{})
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
try:
# parent - let the server start
stopped_early = False
time.sleep(1)
def delay_hook(pkt):
time.sleep(0.005) # 5ms
client.download('640KBFILE', output, delay_hook)
except:
log.warn("client threw exception as expected")
stopped_early = True
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
self.assertTrue( stopped_early == True,
"Server should not exit early" )
else:
import signal
def handlealarm(signum, frame):
server.stop(now=True)
signal.signal(signal.SIGALRM, handlealarm)
signal.alarm(2)
try:
server.listen('localhost', 20001)
log.error("server didn't throw exception")
except Exception as err:
log.error("server got unexpected exception %s" % err)
# Wait until parent kills us
while True:
time.sleep(1)
def testServerDownloadWithStopNotNow(self, output='/tmp/out'):
log.debug("===> Running testcase testServerDownloadWithStopNotNow")
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
client = tftpy.TftpClient('localhost',
20001,
{})
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
try:
stopped_early = True
# parent - let the server start
time.sleep(1)
def delay_hook(pkt):
time.sleep(0.005) # 5ms
client.download('640KBFILE', output, delay_hook)
stopped_early = False
except:
log.warn("client threw exception as expected")
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
self.assertTrue( stopped_early == False,
"Server should not exit early" )
else:
import signal
def handlealarm(signum, frame):
server.stop(now=False)
signal.signal(signal.SIGALRM, handlealarm)
signal.alarm(2)
try:
server.listen('localhost', 20001)
except Exception as err:
log.error("server threw exception %s" % err)
# Wait until parent kills us
while True:
time.sleep(1)
def testServerDownloadWithDynamicPort(self, output='/tmp/out'):
log.debug("===> Running testcase testServerDownloadWithDynamicPort")
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
server_thread = threading.Thread(target=server.listen,
kwargs={'listenip': 'localhost',
'listenport': 0})
server_thread.start()
try:
server.is_running.wait()
client = tftpy.TftpClient('localhost', server.listenport, {})
time.sleep(1)
client.download('640KBFILE',
output)
finally:
server.stop(now=False)
server_thread.join()
class TestTftpyLoggers(unittest.TestCase):
def setUp(self):
tftpy.setLogLevel(logging.DEBUG)
def testStreamLogger(self):
# Not sure how best to test this. Maybe configure the loggers and look
# for any complaints.
try:
tftpy.addHandler(tftpy.create_streamhandler())
self.assertTrue( True )
except:
self.assertTrue( False )
def testFileLogger(self):
# Same as previous.
try:
tftpy.addHandler(tftpy.create_rotatingfilehandler('/tmp/log'))
self.assertTrue( True )
except:
self.assertTrue( False )
if __name__ == '__main__':
unittest.main()
| t/test.py | 18,676 | Fire up a client and a server and do a download.
Fire up a client and a server and do an upload.
Unit tests for tftpy.
repeat test with options repeat test with options Test that if we make blksize a number, it comes back a string. Test string to string Make sure that the correct class is created for the correct opcode. Fork a server and run the client in this process. parent - let the server start Fork a server and run the client in this process. parent - let the server start Testing without the dyn_func_file set. Start the download. At a 512 byte blocksize, this should be 1280 packets exactly. Should be in expectack state. The last DAT packet should be empty, indicating a completed transfer. Testing without the dyn_func_file set. Start the download. At a 512 byte blocksize, this should be 1280 packets exactly. Should be in expectack state, or None The last DAT packet should be empty, indicating a completed transfer. Start the download. Start the download. Should be in expectack state. Fork a server and run the client in this process. parent - let the server start 5ms Wait until parent kills us Fork a server and run the client in this process. parent - let the server start 5ms Wait until parent kills us Not sure how best to test this. Maybe configure the loggers and look for any complaints. Same as previous. | 1,332 | en | 0.848055 |
r"""
Early Stopping
^^^^^^^^^^^^^^
Monitor a validation metric and stop training when it stops improving.
"""
from copy import deepcopy
import numpy as np
import torch
import torch.distributed as dist
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
torch_inf = torch.tensor(np.Inf)
try:
import torch_xla
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class EarlyStopping(Callback):
r"""
Args:
monitor: quantity to be monitored. Default: ``'val_loss'``.
.. note:: Has no effect when using `EvalResult` or `TrainResult`
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than `min_delta`, will count as no
improvement. Default: ``0.0``.
patience: number of validation epochs with no improvement
after which training will be stopped. Default: ``3``.
verbose: verbosity mode. Default: ``False``.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity. Default: ``'auto'``.
strict: whether to crash the training if `monitor` is
not found in the validation metrics. Default: ``True``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import EarlyStopping
>>> early_stopping = EarlyStopping('val_loss')
>>> trainer = Trainer(early_stop_callback=early_stopping)
"""
mode_dict = {
'min': torch.lt,
'max': torch.gt,
}
def __init__(self, monitor: str = 'val_loss', min_delta: float = 0.0, patience: int = 3,
verbose: bool = False, mode: str = 'auto', strict: bool = True):
super().__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.strict = strict
self.min_delta = min_delta
self.wait_count = 0
self.stopped_epoch = 0
self.mode = mode
if mode not in self.mode_dict:
if self.verbose > 0:
log.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.')
self.mode = 'auto'
if self.mode == 'auto':
if self.monitor == 'acc':
self.mode = 'max'
else:
self.mode = 'min'
if self.verbose > 0:
log.info(f'EarlyStopping mode set to {self.mode} for monitoring {self.monitor}.')
self.min_delta *= 1 if self.monitor_op == torch.gt else -1
self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
def _validate_condition_metric(self, logs):
monitor_val = logs.get(self.monitor)
error_msg = (f'Early stopping conditioned on metric `{self.monitor}`'
f' which is not available. Either add `{self.monitor}` to the return of '
f' validation_epoch end or modify your EarlyStopping callback to use any of the '
f'following: `{"`, `".join(list(logs.keys()))}`')
if monitor_val is None:
if self.strict:
raise RuntimeError(error_msg)
if self.verbose > 0:
rank_zero_warn(error_msg, RuntimeWarning)
return False
return True
@property
def monitor_op(self):
return self.mode_dict[self.mode]
def state_dict(self):
return {
'wait_count': self.wait_count,
'stopped_epoch': self.stopped_epoch,
'best_score': self.best_score,
'patience': self.patience
}
def load_state_dict(self, state_dict):
state_dict = deepcopy(state_dict)
self.wait_count = state_dict['wait_count']
self.stopped_epoch = state_dict['stopped_epoch']
self.best_score = state_dict['best_score']
self.patience = state_dict['patience']
def on_validation_end(self, trainer, pl_module):
self._run_early_stopping_check(trainer, pl_module)
def on_validation_epoch_end(self, trainer, pl_module):
val_es_key = 'val_early_stop_on'
if trainer.callback_metrics.get(val_es_key) is not None:
self.monitor = val_es_key
# disable strict checking when using structured results
if val_es_key in trainer.callback_metrics:
self.strict = False
self._validate_condition_metric(trainer.callback_metrics)
def on_train_epoch_end(self, trainer, pl_module):
# disable early stopping in train loop when there's a val loop
if self.monitor == 'val_early_stop_on':
return
# early stopping can also work in the train loop when there is no val loop and when using structured results
should_check_early_stop = False
train_es_key = 'early_stop_on'
if trainer.callback_metrics.get(train_es_key, None) is not None:
self.monitor = train_es_key
should_check_early_stop = True
if should_check_early_stop:
self._run_early_stopping_check(trainer, pl_module)
def _run_early_stopping_check(self, trainer, pl_module):
logs = trainer.callback_metrics
if not self._validate_condition_metric(logs):
return # short circuit if metric not present
current = logs.get(self.monitor)
# when in dev debugging
trainer.dev_debugger.track_early_stopping_history(current)
if not isinstance(current, torch.Tensor):
current = torch.tensor(current, device=pl_module.device)
if trainer.use_tpu and XLA_AVAILABLE:
current = current.cpu()
if self.monitor_op(current - self.min_delta, self.best_score):
self.best_score = current
self.wait_count = 0
else:
self.wait_count += 1
should_stop = self.wait_count >= self.patience
if bool(should_stop):
self.stopped_epoch = trainer.current_epoch
trainer.should_stop = True
# stop every ddp process if any world process decides to stop
self._stop_distributed_training(trainer, pl_module)
def _stop_distributed_training(self, trainer, pl_module):
# in ddp make sure all processes stop when one is flagged
if trainer.use_ddp or trainer.use_ddp2:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device)
dist.all_reduce(stop, op=dist.reduce_op.SUM)
dist.barrier()
trainer.should_stop = stop == trainer.world_size
if trainer.use_tpu:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device, dtype=torch.int32)
stop = xm.mesh_reduce("stop_signal", stop, torch.cat)
torch_xla.core.xla_model.rendezvous("pl.EarlyStoppingCallback.stop_distributed_training_check")
trainer.should_stop = int(stop.item()) == trainer.world_size
def on_train_end(self, trainer, pl_module):
if self.stopped_epoch > 0 and self.verbose > 0:
rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from "1" until v0.6.x,'
' but will start from "0" in v0.8.0.', DeprecationWarning)
log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.')
| pytorch_lightning/callbacks/early_stopping.py | 7,788 | Args:
monitor: quantity to be monitored. Default: ``'val_loss'``.
.. note:: Has no effect when using `EvalResult` or `TrainResult`
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than `min_delta`, will count as no
improvement. Default: ``0.0``.
patience: number of validation epochs with no improvement
after which training will be stopped. Default: ``3``.
verbose: verbosity mode. Default: ``False``.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity. Default: ``'auto'``.
strict: whether to crash the training if `monitor` is
not found in the validation metrics. Default: ``True``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import EarlyStopping
>>> early_stopping = EarlyStopping('val_loss')
>>> trainer = Trainer(early_stop_callback=early_stopping)
Early Stopping
^^^^^^^^^^^^^^
Monitor a validation metric and stop training when it stops improving.
disable strict checking when using structured results disable early stopping in train loop when there's a val loop early stopping can also work in the train loop when there is no val loop and when using structured results short circuit if metric not present when in dev debugging stop every ddp process if any world process decides to stop in ddp make sure all processes stop when one is flagged | 1,744 | en | 0.80543 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v4.resources.types import ad_group
from google.ads.googleads.v4.services.types import ad_group_service
from .base import AdGroupServiceTransport, DEFAULT_CLIENT_INFO
class AdGroupServiceGrpcTransport(AdGroupServiceTransport):
"""gRPC backend transport for AdGroupService.
Service to manage ad groups.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_ad_group(
self,
) -> Callable[[ad_group_service.GetAdGroupRequest], ad_group.AdGroup]:
r"""Return a callable for the get ad group method over gRPC.
Returns the requested ad group in full detail.
Returns:
Callable[[~.GetAdGroupRequest],
~.AdGroup]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_ad_group" not in self._stubs:
self._stubs["get_ad_group"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.AdGroupService/GetAdGroup",
request_serializer=ad_group_service.GetAdGroupRequest.serialize,
response_deserializer=ad_group.AdGroup.deserialize,
)
return self._stubs["get_ad_group"]
@property
def mutate_ad_groups(
self,
) -> Callable[
[ad_group_service.MutateAdGroupsRequest],
ad_group_service.MutateAdGroupsResponse,
]:
r"""Return a callable for the mutate ad groups method over gRPC.
Creates, updates, or removes ad groups. Operation
statuses are returned.
Returns:
Callable[[~.MutateAdGroupsRequest],
~.MutateAdGroupsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_ad_groups" not in self._stubs:
self._stubs["mutate_ad_groups"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.AdGroupService/MutateAdGroups",
request_serializer=ad_group_service.MutateAdGroupsRequest.serialize,
response_deserializer=ad_group_service.MutateAdGroupsResponse.deserialize,
)
return self._stubs["mutate_ad_groups"]
__all__ = ("AdGroupServiceGrpcTransport",)
| google/ads/googleads/v4/services/services/ad_group_service/transports/grpc.py | 11,198 | gRPC backend transport for AdGroupService.
Service to manage ad groups.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Return a callable for the get ad group method over gRPC.
Returns the requested ad group in full detail.
Returns:
Callable[[~.GetAdGroupRequest],
~.AdGroup]:
A function that, when called, will call the underlying RPC
on the server.
Return the channel designed to connect to this service.
Return a callable for the mutate ad groups method over gRPC.
Creates, updates, or removes ad groups. Operation
statuses are returned.
Returns:
Callable[[~.MutateAdGroupsRequest],
~.MutateAdGroupsResponse]:
A function that, when called, will call the underlying RPC
on the server.
-*- coding: utf-8 -*- Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore Sanity check: Ensure that channel and credentials are not both provided. If a channel was explicitly provided, set it. Create SSL credentials with client_cert_source or application default SSL credentials. create a new channel. The provided one is ignored. create a new channel. The provided one is ignored. type: Dict[str, Callable] Run the base constructor. Generate a "stub function" on-the-fly which will actually make the request. gRPC handles serialization and deserialization, so we just need to pass in the functions for each. Generate a "stub function" on-the-fly which will actually make the request. gRPC handles serialization and deserialization, so we just need to pass in the functions for each. | 5,160 | en | 0.81415 |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/72_callback.neptune.ipynb (unless otherwise specified).
__all__ = ['NeptuneCallback']
# Cell
import tempfile
from ..basics import *
from ..learner import Callback
# Cell
import neptune
# Cell
class NeptuneCallback(Callback):
"Log losses, metrics, model weights, model architecture summary to neptune"
order = Recorder.order+1
def __init__(self, log_model_weights=True, keep_experiment_running=False):
self.log_model_weights = log_model_weights
self.keep_experiment_running = keep_experiment_running
self.experiment = None
if neptune.project is None:
raise ValueError('You did not initialize project in neptune.\n',
'Please invoke `neptune.init("USERNAME/PROJECT_NAME")` before this callback.')
def before_fit(self):
try:
self.experiment = neptune.get_experiment()
except ValueError:
print('No active experiment. Please invoke `neptune.create_experiment()` before this callback.')
try:
self.experiment.set_property('n_epoch', str(self.learn.n_epoch))
self.experiment.set_property('model_class', str(type(self.learn.model)))
except: print(f'Did not log all properties. Check properties in the {neptune.get_experiment()}.')
try:
with tempfile.NamedTemporaryFile(mode='w') as f:
with open(f.name, 'w') as g: g.write(repr(self.learn.model))
self.experiment.log_artifact(f.name, 'model_summary.txt')
except: print('Did not log model summary. Check if your model is PyTorch model.')
if self.log_model_weights and not hasattr(self.learn, 'save_model'):
print('Unable to log model to Neptune.\n',
'Use "SaveModelCallback" to save model checkpoints that will be logged to Neptune.')
def after_batch(self):
# log loss and opt.hypers
if self.learn.training:
self.experiment.log_metric('batch__smooth_loss', self.learn.smooth_loss)
self.experiment.log_metric('batch__loss', self.learn.loss)
self.experiment.log_metric('batch__train_iter', self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items(): self.experiment.log_metric(f'batch__opt.hypers.{k}', v)
def after_epoch(self):
# log metrics
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ['epoch', 'time']: self.experiment.log_metric(f'epoch__{n}', v)
if n == 'time': self.experiment.log_text(f'epoch__{n}', str(v))
# log model weights
if self.log_model_weights and hasattr(self.learn, 'save_model'):
if self.learn.save_model.every_epoch:
_file = join_path_file(f'{self.learn.save_model.fname}_{self.learn.save_model.epoch}',
self.learn.path / self.learn.model_dir, ext='.pth')
else:
_file = join_path_file(self.learn.save_model.fname,
self.learn.path / self.learn.model_dir, ext='.pth')
self.experiment.log_artifact(_file)
def after_fit(self):
if not self.keep_experiment_running:
try: self.experiment.stop()
except: print('No neptune experiment to stop.')
else:
print(f'Your experiment (id: {self.experiment.id}, name: {self.experiment.name}) is left in the running state.\n',
'You can log more data to it, like this: `neptune.log_metric()`') | fastai/callback/neptune.py | 3,647 | Log losses, metrics, model weights, model architecture summary to neptune
AUTOGENERATED! DO NOT EDIT! File to edit: nbs/72_callback.neptune.ipynb (unless otherwise specified). Cell Cell Cell log loss and opt.hypers log metrics log model weights | 246 | en | 0.799035 |
'''
Module:
Set regular or irregular axis ticks for a plot.
'''
from module_utility import *
import numpy as np
import matplotlib.pyplot as plt
# ticks : contains irregular ticks locations
# tickbeg : regular major ticks begin location
# tickend : regular major ticks end location
# tickd : regular major ticks interval
# mtick : number of minor tick intervals betwen two major ticks
# xbeg : axis begin location
# xend : axis end location
# ns : number of points to plot
# d : interval between two points
# axislen : apparent axis length
def define_tick(ticks, tickbeg, tickend, tickd, mtick, xbeg, xend, ns, d, axislen, format, extend=False):
# regular ticks
if ticks is None:
# major tick interval
if tickd is None:
tick_interval = nice((xend - xbeg) / 5.0)
if tick_interval == 0:
tick_interval = 1.0e10
else:
tick_interval = float(tickd)
# tick begin location
if tickbeg is None:
tick_beg = nice(xbeg)
base = 0.5
nb = 0
if tick_interval > 0:
while nb <= 10 and tick_beg > xbeg + tick_interval:
base = base / 10.0
tick_beg = nice(xbeg, base)
nb = nb + 1
else:
while nb <= 10 and tick_beg < xbeg + tick_interval:
base = base / 10.0
tick_beg = nice(xbeg, base)
nb = nb + 1
else:
tick_beg = float(tickbeg)
# tick end location
if tickend is None:
tick_end = tick_beg + (round((xend - xbeg) / tick_interval) + 2) * tick_interval
if tick_interval > 0:
while tick_end < xend:
tick_end = tick_end + abs(tick_interval)
else:
while tick_end > xend:
tick_end = tick_end - abs(tick_interval)
else:
tick_end = float(tickend)
# regular major and minor tick locations
tick = np.arange(tick_beg, tick_end + 0.1 * abs(tick_interval), tick_interval)
minor_tick_interval = tick_interval / (mtick + 1.0)
minor_tick = np.arange(tick_beg, tick_end + 0.1 * abs(minor_tick_interval), minor_tick_interval)
# some ticks might out of axis range, therefore remove them if strict
if not extend:
if d > 0:
tick = np.asarray([i for i in tick if i >= xbeg and i <= xend])
minor_tick = np.asarray(
[i for i in minor_tick if i >= xbeg and i <= xend and (not i in tick)])
if d < 0:
tick = np.asarray([i for i in tick if i <= xbeg and i >= xend])
minor_tick = np.asarray(
[i for i in minor_tick if i <= xbeg and i >= xend and (not i in tick)])
# linearly scale the ticks to figure canvas
if ns == 1:
# if only one sample point, then tick location is 0.5
tick_location = np.asarray([0.5])
ntick = 1
else:
# if multiple sample points, then scale to apparent axis length
tick_location = [(i - xbeg + 0.5 * d) / ((ns - 1) * d) * axislen for i in tick]
minor_tick_location = [(i - xbeg + 0.5 * d) / ((ns - 1) * d) * axislen for i in minor_tick]
t = tick_location
# set major tick location and labels, note some major ticks might be out of axis range
tl = []
tick_label = []
for i in range(0, len(tick)):
if extend or ((not extend) and tick_location[i] >= 0 and tick_location[i] <= axislen + 1.0e-10):
tl.append(tick_location[i])
if format == 'sci' or format == 'plain':
tick_label.append(('%f' % tick[i]).rstrip('0').rstrip('.'))
else:
tick_label.append((format % tick[i]))
tick_location = tl
# irregular ticks
else:
# get contents from user-specified ticks
ticks = ticks[0].split(',')
location = [0 for i in range(0, len(ticks))]
label = ['' for i in range(0, len(ticks))]
# set tick locations
for i in range(0, len(ticks)):
t = ticks[i].split(':')
location[i] = (float(t[0]) + 0.5 * d) / ((ns - 1) * d) * axislen
label[i] = t[1]
# sort according to tick location
yx = list(zip(location, label))
yx.sort()
tick_location = [location for location, label in yx]
tick_label = [label for location, label in yx]
# minor ticks
if mtick != 0:
mtick = mtick + 1
minor_tick_location = np.linspace(tick_location[0], tick_location[1], mtick + 1)
minor_tick_location = minor_tick_location[1:mtick]
for i in range(1, len(tick_location) - 1):
t = np.linspace(tick_location[i], tick_location[i + 1], mtick + 1)
minor_tick_location = np.append(minor_tick_location, t[1:mtick])
else:
minor_tick_location = []
# return major tick location, major tick label and minor tick location
return tick_location, tick_label, minor_tick_location
def set_tick(args,
font,
x1beg,
x1end,
n1beg,
n1end,
d1,
axis1len,
x2beg,
x2end,
n2beg,
n2end,
d2,
axis2len,
extend=False):
ax = plt.gca()
label_1_size = float(args.label1size)
label_2_size = float(args.label2size)
xlabel = ax.set_xlabel(args.label2, fontsize=label_2_size, labelpad=float(args.label2pad)*72*2)
ylabel = ax.set_ylabel(args.label1, fontsize=label_1_size, labelpad=float(args.label1pad)*72*2)
l = ax.yaxis.get_label()
l.set_fontproperties(font)
l.set_fontsize(label_1_size)
l = ax.xaxis.get_label()
l.set_fontproperties(font)
l.set_fontsize(label_2_size)
if args.label2loc is not None:
ax.xaxis.set_label_position(args.label2loc)
else:
if args.ticktop:
ax.xaxis.set_label_position('top')
else:
ax.xaxis.set_label_position('bottom')
if args.label1loc is not None:
ax.yaxis.set_label_position(args.label1loc)
else:
if args.tickleft:
ax.yaxis.set_label_position('left')
else:
ax.yaxis.set_label_position('right')
ylabel.set_rotation(270)
# ticks on/off
ax.get_yaxis().set_tick_params(which='both', direction='out')
ax.get_xaxis().set_tick_params(which='both', direction='out')
plt.tick_params(
axis='x', # changes apply to the x1-axis
which='both', # both major and minor ticks are affected
bottom=args.tickbottom, # ticks along the bottom axis
top=args.ticktop, # ticks along the top axis
labelbottom=args.tickbottom, # labels along the bottom axis
labeltop=args.ticktop) # labels along the top axis
plt.tick_params(
axis='y', # changes apply to the x2-axis
which='both', # both major and minor ticks are affected
left=args.tickleft, # ticks along the left axis
right=args.tickright, # ticks along the right axis
labelleft=args.tickleft, # labels along the left axis
labelright=args.tickright) # labels along the right axis
# if tick font size and family not speciefied, then inherit from axis labels
if args.tick1size is None:
tick_1_font_size = label_1_size - 2
else:
tick_1_font_size = float(args.tick1size)
if args.tick2size is None:
tick_2_font_size = label_2_size - 2
else:
tick_2_font_size = float(args.tick2size)
# axis 1
tick_1_location, tick_1_label, tick_1_minor = define_tick(args.ticks1, args.tick1beg, args.tick1end,
args.tick1d, args.mtick1, x1beg, x1end,
n1end - n1beg + 1, d1, axis1len,
args.tick1format, extend)
plt.yticks(tick_1_location, tick_1_label, fontsize=tick_1_font_size, rotation=float(args.tick1rot))
if not args.tick1label:
ax.yaxis.set_ticklabels([])
# axis 2
tick_2_location, tick_2_label, tick_2_minor = define_tick(args.ticks2, args.tick2beg, args.tick2end,
args.tick2d, args.mtick2, x2beg, x2end,
n2end - n2beg + 1, d2, axis2len,
args.tick2format, extend)
plt.xticks(tick_2_location, tick_2_label, fontsize=tick_2_font_size, rotation=float(args.tick2rot))
if not args.tick2label:
ax.xaxis.set_ticklabels([])
# major and minor ticks sytle
ax.tick_params('both', length=float(args.tickmajorlen), width=float(args.tickmajorwid), which='major')
# minor tick positions
ax.set_yticks(tick_1_minor, minor=True)
ax.set_xticks(tick_2_minor, minor=True)
# minor ticks style
if args.tickminorlen is None:
tick_minor_length = 0.5 * float(args.tickmajorlen)
else:
tick_minor_length = float(args.tickminorlen)
if args.tickminorwid is None:
tick_minor_width = 0.75 * float(args.tickmajorwid)
else:
tick_minor_width = float(args.tickminorwid)
ax.tick_params('both', length=tick_minor_length, width=tick_minor_width, which='minor')
for l in ax.yaxis.get_ticklabels():
l.set_fontproperties(font)
l.set_fontsize(tick_1_font_size)
for l in ax.xaxis.get_ticklabels():
l.set_fontproperties(font)
l.set_fontsize(tick_2_font_size)
# make tick labels rigid
def rigid_tick_label(tick_label):
ndec = 0
for i in tick_label:
dec = i.split('.')
if len(dec) == 2:
ll = len(dec[1])
if ll > ndec:
ndec = ll
for i in range(0, len(tick_label)):
dec = tick_label[i].split('.')
if len(dec) == 2:
ll = len(dec[1])
if ll < ndec:
for k in range(0, ndec - ll):
tick_label[i] = tick_label[i] + '0'
if len(dec) == 1 and ndec != 0:
tick_label[i] = tick_label[i] + '.'
for k in range(0, ndec):
tick_label[i] = tick_label[i] + '0'
return tick_label
| src/module_tick.py | 10,568 | Module:
Set regular or irregular axis ticks for a plot.
ticks : contains irregular ticks locations tickbeg : regular major ticks begin location tickend : regular major ticks end location tickd : regular major ticks interval mtick : number of minor tick intervals betwen two major ticks xbeg : axis begin location xend : axis end location ns : number of points to plot d : interval between two points axislen : apparent axis length regular ticks major tick interval tick begin location tick end location regular major and minor tick locations some ticks might out of axis range, therefore remove them if strict linearly scale the ticks to figure canvas if only one sample point, then tick location is 0.5 if multiple sample points, then scale to apparent axis length set major tick location and labels, note some major ticks might be out of axis range irregular ticks get contents from user-specified ticks set tick locations sort according to tick location minor ticks return major tick location, major tick label and minor tick location ticks on/off changes apply to the x1-axis both major and minor ticks are affected ticks along the bottom axis ticks along the top axis labels along the bottom axis labels along the top axis changes apply to the x2-axis both major and minor ticks are affected ticks along the left axis ticks along the right axis labels along the left axis labels along the right axis if tick font size and family not speciefied, then inherit from axis labels axis 1 axis 2 major and minor ticks sytle minor tick positions minor ticks style make tick labels rigid | 1,589 | en | 0.840243 |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License 2.0;
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Spreadsheets API.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements
"""
# __author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import gdata.data
GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended'
INSERT_MODE = 'insert'
OVERWRITE_MODE = 'overwrite'
WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed'
BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells'
'/%s/%s/private/full')
BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s'
BATCH_EDIT_LINK_TEMPLATE = '%s/batch'
class Error(Exception):
pass
class FieldMissing(Exception):
pass
class HeaderNotSet(Error):
"""The desired column header had no value for the row in the list feed."""
class Cell(atom.core.XmlElement):
"""The gs:cell element.
A cell in the worksheet. The <gs:cell> element can appear only as a child
of <atom:entry>.
"""
_qname = GS_TEMPLATE % 'cell'
col = 'col'
input_value = 'inputValue'
numeric_value = 'numericValue'
row = 'row'
class ColCount(atom.core.XmlElement):
"""The gs:colCount element.
Indicates the number of columns in the worksheet, including columns that
contain only empty cells. The <gs:colCount> element can appear as a child
of <atom:entry> or <atom:feed>
"""
_qname = GS_TEMPLATE % 'colCount'
class Field(atom.core.XmlElement):
"""The gs:field element.
A field single cell within a record. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'field'
index = 'index'
name = 'name'
class Column(Field):
"""The gs:column element."""
_qname = GS_TEMPLATE % 'column'
class Data(atom.core.XmlElement):
"""The gs:data element.
A data region of a table. Contained in an <atom:entry> element.
"""
_qname = GS_TEMPLATE % 'data'
column = [Column]
insertion_mode = 'insertionMode'
num_rows = 'numRows'
start_row = 'startRow'
class Header(atom.core.XmlElement):
"""The gs:header element.
Indicates which row is the header row. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'header'
row = 'row'
class RowCount(atom.core.XmlElement):
"""The gs:rowCount element.
Indicates the number of total rows in the worksheet, including rows that
contain only empty cells. The <gs:rowCount> element can appear as a
child of <atom:entry> or <atom:feed>.
"""
_qname = GS_TEMPLATE % 'rowCount'
class Worksheet(atom.core.XmlElement):
"""The gs:worksheet element.
The worksheet where the table lives.Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'worksheet'
name = 'name'
class Spreadsheet(gdata.data.GDEntry):
"""An Atom entry which represents a Google Spreadsheet."""
def find_worksheets_feed(self):
return self.find_url(WORKSHEETS_REL)
FindWorksheetsFeed = find_worksheets_feed
def get_spreadsheet_key(self):
"""Extracts the spreadsheet key unique to this spreadsheet."""
return self.get_id().split('/')[-1]
GetSpreadsheetKey = get_spreadsheet_key
class SpreadsheetsFeed(gdata.data.GDFeed):
"""An Atom feed listing a user's Google Spreadsheets."""
entry = [Spreadsheet]
class WorksheetEntry(gdata.data.GDEntry):
"""An Atom entry representing a single worksheet in a spreadsheet."""
row_count = RowCount
col_count = ColCount
def get_worksheet_id(self):
"""The worksheet ID identifies this worksheet in its spreadsheet."""
return self.get_id().split('/')[-1]
GetWorksheetId = get_worksheet_id
class WorksheetsFeed(gdata.data.GDFeed):
"""A feed containing the worksheets in a single spreadsheet."""
entry = [WorksheetEntry]
class Table(gdata.data.GDEntry):
"""An Atom entry that represents a subsection of a worksheet.
A table allows you to treat part or all of a worksheet somewhat like a
table in a database that is, as a set of structured data items. Tables
don't exist until you explicitly create them before you can use a table
feed, you have to explicitly define where the table data comes from.
"""
data = Data
header = Header
worksheet = Worksheet
def get_table_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
GetTableId = get_table_id
class TablesFeed(gdata.data.GDFeed):
"""An Atom feed containing the tables defined within a worksheet."""
entry = [Table]
class Record(gdata.data.GDEntry):
"""An Atom entry representing a single record in a table.
Note that the order of items in each record is the same as the order of
columns in the table definition, which may not match the order of
columns in the GUI.
"""
field = [Field]
def value_for_index(self, column_index):
for field in self.field:
if field.index == column_index:
return field.text
raise FieldMissing('There is no field for %s' % column_index)
ValueForIndex = value_for_index
def value_for_name(self, name):
for field in self.field:
if field.name == name:
return field.text
raise FieldMissing('There is no field for %s' % name)
ValueForName = value_for_name
def get_record_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
class RecordsFeed(gdata.data.GDFeed):
"""An Atom feed containing the individuals records in a table."""
entry = [Record]
class ListRow(atom.core.XmlElement):
"""A gsx column value within a row.
The local tag in the _qname is blank and must be set to the column
name. For example, when adding to a ListEntry, do:
col_value = ListRow(text='something')
col_value._qname = col_value._qname % 'mycolumnname'
"""
_qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s'
class ListEntry(gdata.data.GDEntry):
"""An Atom entry representing a worksheet row in the list feed.
The values for a particular column can be get and set using
x.get_value('columnheader') and x.set_value('columnheader', 'value').
See also the explanation of column names in the ListFeed class.
"""
def get_value(self, column_name):
"""Returns the displayed text for the desired column in this row.
The formula or input which generated the displayed value is not accessible
through the list feed, to see the user's input, use the cells feed.
If a column is not present in this spreadsheet, or there is no value
for a column in this row, this method will return None.
"""
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) == 0:
return None
return values[0].text
def set_value(self, column_name, value):
"""Changes the value of cell in this row under the desired column name.
Warning: if the cell contained a formula, it will be wiped out by setting
the value using the list feed since the list feed only works with
displayed values.
No client side checking is performed on the column_name, you need to
ensure that the column_name is the local tag name in the gsx tag for the
column. For example, the column_name will not contain special characters,
spaces, uppercase letters, etc.
"""
# Try to find the column in this row to change an existing value.
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) > 0:
values[0].text = value
else:
# There is no value in this row for the desired column, so add a new
# gsx:column_name element.
new_value = ListRow(text=value)
new_value._qname = new_value._qname % (column_name,)
self._other_elements.append(new_value)
def to_dict(self):
"""Converts this row to a mapping of column names to their values."""
result = {}
values = self.get_elements(namespace=GSX_NAMESPACE)
for item in values:
result[item._get_tag()] = item.text
return result
def from_dict(self, values):
"""Sets values for this row from the dictionary.
Old values which are already in the entry will not be removed unless
they are overwritten with new values from the dict.
"""
for column, value in values.items():
self.set_value(column, value)
class ListsFeed(gdata.data.GDFeed):
"""An Atom feed in which each entry represents a row in a worksheet.
The first row in the worksheet is used as the column names for the values
in each row. If a header cell is empty, then a unique column ID is used
for the gsx element name.
Spaces in a column name are removed from the name of the corresponding
gsx element.
Caution: The columnNames are case-insensitive. For example, if you see
a <gsx:e-mail> element in a feed, you can't know whether the column
heading in the original worksheet was "e-mail" or "E-Mail".
Note: If two or more columns have the same name, then subsequent columns
of the same name have _n appended to the columnName. For example, if the
first column name is "e-mail", followed by columns named "E-Mail" and
"E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and
gsx:e-mail_3 respectively.
"""
entry = [ListEntry]
class CellEntry(gdata.data.BatchEntry):
"""An Atom entry representing a single cell in a worksheet."""
cell = Cell
class CellsFeed(gdata.data.BatchFeed):
"""An Atom feed contains one entry per cell in a worksheet.
The cell feed supports batch operations, you can send multiple cell
operations in one HTTP request.
"""
entry = [CellEntry]
def add_set_cell(self, row, col, input_value):
"""Adds a request to change the contents of a cell to this batch request.
Args:
row: int, The row number for this cell. Numbering starts at 1.
col: int, The column number for this cell. Starts at 1.
input_value: str, The desired formula/content this cell should contain.
"""
self.add_update(CellEntry(
id=atom.data.Id(text=BATCH_ENTRY_ID_TEMPLATE % (
self.id.text, row, col)),
cell=Cell(col=str(col), row=str(row), input_value=input_value)))
return self
AddSetCell = add_set_cell
def build_batch_cells_update(spreadsheet_key, worksheet_id):
"""Creates an empty cells feed for adding batch cell updates to.
Call batch_set_cell on the resulting CellsFeed instance then send the batch
request TODO: fill in
Args:
spreadsheet_key: The ID of the spreadsheet
worksheet_id:
"""
feed_id_text = BATCH_POST_ID_TEMPLATE % (spreadsheet_key, worksheet_id)
return CellsFeed(
id=atom.data.Id(text=feed_id_text),
link=[atom.data.Link(
rel='edit', href=BATCH_EDIT_LINK_TEMPLATE % (feed_id_text,))])
BuildBatchCellsUpdate = build_batch_cells_update
| src/gdata/spreadsheets/data.py | 11,530 | The gs:cell element.
A cell in the worksheet. The <gs:cell> element can appear only as a child
of <atom:entry>.
An Atom entry representing a single cell in a worksheet.
An Atom feed contains one entry per cell in a worksheet.
The cell feed supports batch operations, you can send multiple cell
operations in one HTTP request.
The gs:colCount element.
Indicates the number of columns in the worksheet, including columns that
contain only empty cells. The <gs:colCount> element can appear as a child
of <atom:entry> or <atom:feed>
The gs:column element.
The gs:data element.
A data region of a table. Contained in an <atom:entry> element.
The gs:field element.
A field single cell within a record. Contained in an <atom:entry>.
The gs:header element.
Indicates which row is the header row. Contained in an <atom:entry>.
The desired column header had no value for the row in the list feed.
An Atom entry representing a worksheet row in the list feed.
The values for a particular column can be get and set using
x.get_value('columnheader') and x.set_value('columnheader', 'value').
See also the explanation of column names in the ListFeed class.
A gsx column value within a row.
The local tag in the _qname is blank and must be set to the column
name. For example, when adding to a ListEntry, do:
col_value = ListRow(text='something')
col_value._qname = col_value._qname % 'mycolumnname'
An Atom feed in which each entry represents a row in a worksheet.
The first row in the worksheet is used as the column names for the values
in each row. If a header cell is empty, then a unique column ID is used
for the gsx element name.
Spaces in a column name are removed from the name of the corresponding
gsx element.
Caution: The columnNames are case-insensitive. For example, if you see
a <gsx:e-mail> element in a feed, you can't know whether the column
heading in the original worksheet was "e-mail" or "E-Mail".
Note: If two or more columns have the same name, then subsequent columns
of the same name have _n appended to the columnName. For example, if the
first column name is "e-mail", followed by columns named "E-Mail" and
"E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and
gsx:e-mail_3 respectively.
An Atom entry representing a single record in a table.
Note that the order of items in each record is the same as the order of
columns in the table definition, which may not match the order of
columns in the GUI.
An Atom feed containing the individuals records in a table.
The gs:rowCount element.
Indicates the number of total rows in the worksheet, including rows that
contain only empty cells. The <gs:rowCount> element can appear as a
child of <atom:entry> or <atom:feed>.
An Atom entry which represents a Google Spreadsheet.
An Atom feed listing a user's Google Spreadsheets.
An Atom entry that represents a subsection of a worksheet.
A table allows you to treat part or all of a worksheet somewhat like a
table in a database that is, as a set of structured data items. Tables
don't exist until you explicitly create them before you can use a table
feed, you have to explicitly define where the table data comes from.
An Atom feed containing the tables defined within a worksheet.
The gs:worksheet element.
The worksheet where the table lives.Contained in an <atom:entry>.
An Atom entry representing a single worksheet in a spreadsheet.
A feed containing the worksheets in a single spreadsheet.
Adds a request to change the contents of a cell to this batch request.
Args:
row: int, The row number for this cell. Numbering starts at 1.
col: int, The column number for this cell. Starts at 1.
input_value: str, The desired formula/content this cell should contain.
Creates an empty cells feed for adding batch cell updates to.
Call batch_set_cell on the resulting CellsFeed instance then send the batch
request TODO: fill in
Args:
spreadsheet_key: The ID of the spreadsheet
worksheet_id:
Sets values for this row from the dictionary.
Old values which are already in the entry will not be removed unless
they are overwritten with new values from the dict.
Extracts the spreadsheet key unique to this spreadsheet.
Returns the displayed text for the desired column in this row.
The formula or input which generated the displayed value is not accessible
through the list feed, to see the user's input, use the cells feed.
If a column is not present in this spreadsheet, or there is no value
for a column in this row, this method will return None.
The worksheet ID identifies this worksheet in its spreadsheet.
Changes the value of cell in this row under the desired column name.
Warning: if the cell contained a formula, it will be wiped out by setting
the value using the list feed since the list feed only works with
displayed values.
No client side checking is performed on the column_name, you need to
ensure that the column_name is the local tag name in the gsx tag for the
column. For example, the column_name will not contain special characters,
spaces, uppercase letters, etc.
Converts this row to a mapping of column names to their values.
Provides classes and constants for the XML in the Google Spreadsheets API.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements
!/usr/bin/env python Copyright (C) 2009 Google Inc. Licensed under the Apache License 2.0; This module is used for version 2 of the Google Data APIs. __author__ = 'j.s@google.com (Jeff Scudder)' Try to find the column in this row to change an existing value. There is no value in this row for the desired column, so add a new gsx:column_name element. | 5,679 | en | 0.789845 |
import numpy as np
import pandas as pd
from openpyxl import load_workbook
import sys
def print_array_to_excel(array, first_cell, ws, axis=2):
'''
Print an np array to excel using openpyxl
:param array: np array
:param first_cell: first cell to start dumping values in
:param ws: worksheet reference. From openpyxl, ws=wb[sheetname]
:param axis: to determine if the array is a col vector (0), row vector (1), or 2d matrix (2)
'''
if isinstance(array, (list,)):
array = np.array(array)
shape = array.shape
if axis == 0:
# Treat array as col vector and print along the rows
array.flatten() # Flatten in case the input array is a nx1 ndarry which acts weird
for i in range(shape[0]):
j = 0
ws.cell(i + first_cell[0], j + first_cell[1]).value = array[i]
elif axis == 1:
# Treat array as row vector and print along the columns
array.flatten() # Flatten in case the input array is a 1xn ndarry which acts weird
for j in range(shape[0]):
i = 0
ws.cell(i + first_cell[0], j + first_cell[1]).value = array[j]
elif axis == 2:
# If axis==2, means it is a 2d array
for i in range(shape[0]):
for j in range(shape[1]):
ws.cell(i + first_cell[0], j + first_cell[1]).value = array[i, j]
if __name__ == '__main__':
print('hi') | gold nanocluster synthesis/own_package/others.py | 1,413 | Print an np array to excel using openpyxl
:param array: np array
:param first_cell: first cell to start dumping values in
:param ws: worksheet reference. From openpyxl, ws=wb[sheetname]
:param axis: to determine if the array is a col vector (0), row vector (1), or 2d matrix (2)
Treat array as col vector and print along the rows Flatten in case the input array is a nx1 ndarry which acts weird Treat array as row vector and print along the columns Flatten in case the input array is a 1xn ndarry which acts weird If axis==2, means it is a 2d array | 550 | en | 0.789732 |
# -*- coding: utf-8 -*-
# URL : https://leetcode-cn.com/problems/median-of-two-sorted-arrays/
""""""
"""
problem:
给定两个大小为 m 和 n 的有序数组 nums1 和 nums2。
请你找出这两个有序数组的中位数,并且要求算法的时间复杂度为 O(log(m + n))。
你可以假设 nums1 和 nums2 不会同时为空。
示例 1:
nums1 = [1, 3]
nums2 = [2]
则中位数是 2.0
示例 2:
nums1 = [1, 2]
nums2 = [3, 4]
则中位数是 (2 + 3)/2 = 2.5
"""
"""
explain:
看清楚,复杂度是 O(log(m + n)),而不是 O(m + n),所以不能合并这两个数组,要原封不动,用下标去访问找出中位数。
中位数就是排序数组序列的中间位置的元素,奇数个元素取一个中间元素,偶数个元素取中间两个元素求平均。
要寻找的两个元素(非下标):(m + n + 1) / 2,(m + n + 2) / 2,当元素个数为奇数个时,这两个值是相等的,因此可以寻找这两个位置的元素出来求平均。
题目转变成找出第 k 个的元素,这里的 k 就是上面那两个。
这两个数组,是各自有序,要找这两个的元素,就需要进行比较淘汰。
找第 k 个元素的过程:
取出各自下标为 k / 2 - 1 的元素,也就是中间元素,这里就可以使得复杂度为 log 级别。
如果 nums1 < nums2,就表明 nums1 前面 k / 2 不可能有合并之后的 k,可以淘汰 nums1 的前 k / 2 个元素;
如果 nums1 > nums2,也表明 nums2 前面 k / 2 可以淘汰。
淘汰之后,k 变为 k - k / 2。
另外,k == 1 时,就不存在 k / 2(中间元素),此时比较 nums1、nums2 当前索引值的大小,取小的那一个,因为这里是取第 1(k) 个元素。
当索引值超出对应的 nums 长度时,表明 k 在另一个数组中,可以返回下标为 (索引值 + k - 1) 的元素,其中(k - 1)就是取下标。
演示:
nums1 = [1, 2, 3]
nums2 = [4, 5, 6]
根据 (m + n + 1) / 2,(m + n + 2) / 2,需要找出第 3,4 这两个元素,求平均值
初始索引值:index1 = index2 = 0
找 k == 3 的过程:
1. 根据 k / 2 - 1,各自取出下标为 0 的元素,分别是 1 和 4;由于 1 < 4,所以淘汰 nums1 中的前 k / 2 个元素,即 index1(索引值)为 1。
2. 根据 k - k / 2,k 变更为 2。
3. 变成寻找 k == 2 的过程,重复 1、2 步骤。
4. 各自取出下标为 0 的元素(叠加索引值),分别是 2 和 4;由于 2 < 4,所以 nums1 只剩下 3 这个元素,即 index1 == 2。
5. k 变更为 1。
6. 比较 nums1、nums2 当前索引值的大小,取小的那一个,即 3 和 4,取元素 3。
找 k == 4 的过程:
1. 根据 k / 2 - 1,各自取出下标为 1 的元素,分别是 2 和 5;由于 2 < 5,所以淘汰 nums1 中的前 k / 2 个元素,即 index1(索引值)为 2。
2. 根据 k - k / 2,k 变更为 2。
3. 变成寻找 k == 2 的过程,重复 1、2 步骤。
4. 各自取出下标为 0 的元素(叠加索引值),分别是 3 和 4;由于 3 < 4,所以 index1 == 3。
5. k 变更为 1。
6. 判断 index1 >= nums1.length,即 nums1 全部淘汰,取 nums2 中下标为 (index2 + k - 1)的元素,即元素 4。
平均值(中位数):
(3 + 4) / 2 = 3.5
"""
"""
out:
执行用时 : 88 ms, 在所有 python 提交中击败了 63.81% 的用户
内存消耗 : 11.8 MB, 在所有 python 提交中击败了 32.58% 的用户
"""
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m = len(nums1)
n = len(nums2)
def find_kth(nums1, nums2, index1, index2, k):
# 索引值范围检查
if index1 >= len(nums1):
return nums2[index2 + k - 1]
if index2 >= len(nums2):
return nums1[index1 + k - 1]
# k == 1
if k == 1:
return nums1[index1] if nums1[index1] < nums2[index2] else nums2[index2]
# 取中间值比较淘汰
do_discard_nums1 = True
mid = k // 2 - 1
if index1 + mid >= len(nums1) or (
index2 + mid < len(nums2) and nums1[index1 + mid] > nums2[index2 + mid]
):
do_discard_nums1 = False
mid += 1
if do_discard_nums1:
# 淘汰 nums1 的 mid 前面的元素
return find_kth(nums1, nums2, index1 + mid, index2, k - mid)
else:
return find_kth(nums1, nums2, index1, index2 + mid, k - mid)
return (
find_kth(nums1, nums2, 0, 0, (m + n + 1) // 2)
+ find_kth(nums1, nums2, 0, 0, (m + n + 2) // 2)
) / 2.0
if __name__ == "__main__":
solution = Solution()
assert solution.findMedianSortedArrays([1, 3], [2]) == 2.0
assert solution.findMedianSortedArrays([2], [1, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2], [3, 4]) == 2.5
assert solution.findMedianSortedArrays([1, 3], [2, 4]) == 2.5
assert solution.findMedianSortedArrays([], [1]) == 1.0
assert solution.findMedianSortedArrays([1], []) == 1.0
assert solution.findMedianSortedArrays([1, 3], []) == 2.0
assert solution.findMedianSortedArrays([], [1, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2, 3], []) == 2.0
assert solution.findMedianSortedArrays([], [1, 2, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2, 3, 5], [4, 6, 7, 8, 9]) == 5.0
assert solution.findMedianSortedArrays([1], [2, 3, 4, 5, 6]) == 3.5
| Codes/xiaohong2019/leetcode/4_median_of_two_sorted_arrays.py | 5,630 | :type nums1: List[int]
:type nums2: List[int]
:rtype: float
-*- coding: utf-8 -*- URL : https://leetcode-cn.com/problems/median-of-two-sorted-arrays/ 索引值范围检查 k == 1 取中间值比较淘汰 淘汰 nums1 的 mid 前面的元素 | 196 | zh | 0.451078 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-27 14:08
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0011_auto_20170727_1324'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='partner_subtitle',
field=wagtail.core.fields.RichTextField(blank=True),
),
migrations.AddField(
model_name='homepage',
name='partner_title',
field=wagtail.core.fields.RichTextField(blank=True),
),
]
| django-website/home/migrations/0012_auto_20170727_1408.py | 665 | -*- coding: utf-8 -*- Generated by Django 1.11.3 on 2017-07-27 14:08 | 68 | en | 0.626203 |
import disnake
from disnake.ext import commands
# Define a simple View that persists between bot restarts
# In order a view to persist between restarts it needs to meet the following conditions:
# 1) The timeout of the View has to be set to None
# 2) Every item in the View has to have a custom_id set
# It is recommended that the custom_id be sufficiently unique to
# prevent conflicts with other buttons the bot sends.
# For this example the custom_id is prefixed with the name of the bot.
# Note that custom_ids can only be up to 100 characters long.
class PersistentView(disnake.ui.View):
def __init__(self):
super().__init__(timeout=None)
@disnake.ui.button(
label="Green", style=disnake.ButtonStyle.green, custom_id="persistent_view:green"
)
async def green(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is green.", ephemeral=True)
@disnake.ui.button(label="Red", style=disnake.ButtonStyle.red, custom_id="persistent_view:red")
async def red(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is red.", ephemeral=True)
@disnake.ui.button(
label="Grey", style=disnake.ButtonStyle.grey, custom_id="persistent_view:grey"
)
async def grey(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is grey.", ephemeral=True)
class PersistentViewBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned)
self.persistent_views_added = False
async def on_ready(self):
if not self.persistent_views_added:
# Register the persistent view for listening here.
# Note that this does not send the view to any message.
# In order to do this you need to first send a message with the View, which is shown below.
# If you have the message_id you can also pass it as a keyword argument, but for this example
# we don't have one.
self.add_view(PersistentView())
self.persistent_views_added = True
print(f"Logged in as {self.user} (ID: {self.user.id})")
print("------")
bot = PersistentViewBot()
@bot.command()
@commands.is_owner()
async def prepare(ctx: commands.Context):
"""Starts a persistent view."""
# In order for a persistent view to be listened to, it needs to be sent to an actual message.
# Call this method once just to store it somewhere.
# In a more complicated program you might fetch the message_id from a database for use later.
# However this is outside of the scope of this simple example.
await ctx.send("What's your favourite colour?", view=PersistentView())
bot.run("token")
| examples/views/persistent.py | 2,883 | Define a simple View that persists between bot restarts In order a view to persist between restarts it needs to meet the following conditions: 1) The timeout of the View has to be set to None 2) Every item in the View has to have a custom_id set It is recommended that the custom_id be sufficiently unique to prevent conflicts with other buttons the bot sends. For this example the custom_id is prefixed with the name of the bot. Note that custom_ids can only be up to 100 characters long. Register the persistent view for listening here. Note that this does not send the view to any message. In order to do this you need to first send a message with the View, which is shown below. If you have the message_id you can also pass it as a keyword argument, but for this example we don't have one. In order for a persistent view to be listened to, it needs to be sent to an actual message. Call this method once just to store it somewhere. In a more complicated program you might fetch the message_id from a database for use later. However this is outside of the scope of this simple example. | 1,088 | en | 0.944938 |
import glob
from itertools import chain
from os import path
import numpy as np
import torch.utils.data as data
import umsgpack
from PIL import Image
class ISSDataset(data.Dataset):
"""Instance segmentation dataset
This assumes the dataset to be formatted as defined in:
https://github.com/mapillary/seamseg/wiki/Dataset-format
Parameters
----------
root_dir : str
Path to the root directory of the dataset
split_name : str
Name of the split to load: this must correspond to one of the files in `root_dir/lst`
transform : callable
Transformer function applied to the loaded entries to prepare them for pytorch. This should be callable as
`transform(img, msk, cat, cls)`, where:
- `img` is a PIL.Image with `mode="RGB"`, containing the RGB data
- `msk` is a list of PIL.Image with `mode="L"`, containing the instance segmentation masks
- `cat` is a list containing the instance id to class id mapping
- `cls` is an integer specifying a requested class for class-uniform sampling, or None
"""
_IMG_DIR = "img"
_MSK_DIR = "msk"
_LST_DIR = "lst"
_METADATA_FILE = "metadata.bin"
def __init__(self, root_dir, split_name, transform):
super(ISSDataset, self).__init__()
self.root_dir = root_dir
self.split_name = split_name
self.transform = transform
# Folders
self._img_dir = path.join(root_dir, ISSDataset._IMG_DIR)
self._msk_dir = path.join(root_dir, ISSDataset._MSK_DIR)
self._lst_dir = path.join(root_dir, ISSDataset._LST_DIR)
for d in self._img_dir, self._msk_dir, self._lst_dir:
if not path.isdir(d):
raise IOError("Dataset sub-folder {} does not exist".format(d))
# Load meta-data and split
self._meta, self._images = self._load_split()
def _load_split(self):
with open(path.join(self.root_dir, ISSDataset._METADATA_FILE), "rb") as fid:
metadata = umsgpack.unpack(fid, encoding="utf-8")
with open(path.join(self._lst_dir, self.split_name + ".txt"), "r") as fid:
lst = fid.readlines()
lst = set(line.strip() for line in lst)
meta = metadata["meta"]
images = [img_desc for img_desc in metadata["images"] if img_desc["id"] in lst]
return meta, images
def _load_item(self, item):
img_desc = self._images[item]
img_file = path.join(self._img_dir, img_desc["id"])
if path.exists(img_file + ".png"):
img_file = img_file + ".png"
elif path.exists(img_file + ".jpg"):
img_file = img_file + ".jpg"
else:
raise IOError("Cannot find any image for id {} in {}".format(img_desc["id"], self._img_dir))
img = Image.open(img_file).convert(mode="RGB")
# Load all masks
msk_file = path.join(self._msk_dir, img_desc["id"] + ".png")
msk = [Image.open(msk_file)]
i = 1
while path.exists("{}.{}".format(msk_file, i)):
msk.append(Image.open("{}.{}".format(msk_file, i)))
i += 1
cat = img_desc["cat"]
iscrowd = img_desc["iscrowd"]
return img, msk, cat, iscrowd, img_desc["id"]
@property
def categories(self):
"""Category names"""
return self._meta["categories"]
@property
def num_categories(self):
"""Number of categories"""
return len(self.categories)
@property
def num_stuff(self):
"""Number of "stuff" categories"""
return self._meta["num_stuff"]
@property
def num_thing(self):
"""Number of "thing" categories"""
return self.num_categories - self.num_stuff
@property
def original_ids(self):
"""Original class id of each category"""
return self._meta["original_ids"]
@property
def palette(self):
"""Default palette to be used when color-coding semantic labels"""
return np.array(self._meta["palette"], dtype=np.uint8)
@property
def img_sizes(self):
"""Size of each image of the dataset"""
return [img_desc["size"] for img_desc in self._images]
@property
def img_categories(self):
"""Categories present in each image of the dataset"""
return [img_desc["cat"] for img_desc in self._images]
def __len__(self):
return len(self._images)
def __getitem__(self, item):
img, msk, cat, iscrowd, idx = self._load_item(item)
rec = self.transform(img, msk, cat, iscrowd)
size = (img.size[1], img.size[0])
img.close()
for m in msk:
m.close()
rec["idx"] = idx
rec["size"] = size
return rec
def get_raw_image(self, idx):
"""Load a single, unmodified image with given id from the dataset"""
img_file = path.join(self._img_dir, idx)
if path.exists(img_file + ".png"):
img_file = img_file + ".png"
elif path.exists(img_file + ".jpg"):
img_file = img_file + ".jpg"
else:
raise IOError("Cannot find any image for id {} in {}".format(idx, self._img_dir))
return Image.open(img_file)
def get_image_desc(self, idx):
"""Look up an image descriptor given the id"""
matching = [img_desc for img_desc in self._images if img_desc["id"] == idx]
if len(matching) == 1:
return matching[0]
else:
raise ValueError("No image found with id %s" % idx)
class ISSTestDataset(data.Dataset):
_EXTENSIONS = ["*.jpg", "*.jpeg", "*.png"]
def __init__(self, in_dir, transform):
super(ISSTestDataset, self).__init__()
self.in_dir = in_dir
self.transform = transform
# Find all images
self._images = []
for img_path in chain(
*(glob.iglob(path.join(self.in_dir, '**', ext), recursive=True) for ext in ISSTestDataset._EXTENSIONS)):
_, name_with_ext = path.split(img_path)
idx, _ = path.splitext(name_with_ext)
with Image.open(img_path) as img_raw:
size = (img_raw.size[1], img_raw.size[0])
self._images.append({
"idx": idx,
"path": img_path,
"size": size,
})
@property
def img_sizes(self):
"""Size of each image of the dataset"""
return [img_desc["size"] for img_desc in self._images]
def __len__(self):
return len(self._images)
def __getitem__(self, item):
# Load image
with Image.open(self._images[item]["path"]) as img_raw:
size = (img_raw.size[1], img_raw.size[0])
img = self.transform(img_raw.convert(mode="RGB"))
return {
"img": img,
"idx": self._images[item]["idx"],
"size": size,
"abs_path": self._images[item]["path"],
"rel_path": path.relpath(self._images[item]["path"], self.in_dir),
}
| seamseg/data/dataset.py | 7,059 | Instance segmentation dataset
This assumes the dataset to be formatted as defined in:
https://github.com/mapillary/seamseg/wiki/Dataset-format
Parameters
----------
root_dir : str
Path to the root directory of the dataset
split_name : str
Name of the split to load: this must correspond to one of the files in `root_dir/lst`
transform : callable
Transformer function applied to the loaded entries to prepare them for pytorch. This should be callable as
`transform(img, msk, cat, cls)`, where:
- `img` is a PIL.Image with `mode="RGB"`, containing the RGB data
- `msk` is a list of PIL.Image with `mode="L"`, containing the instance segmentation masks
- `cat` is a list containing the instance id to class id mapping
- `cls` is an integer specifying a requested class for class-uniform sampling, or None
Category names
Look up an image descriptor given the id
Load a single, unmodified image with given id from the dataset
Categories present in each image of the dataset
Size of each image of the dataset
Size of each image of the dataset
Number of categories
Number of "stuff" categories
Number of "thing" categories
Original class id of each category
Default palette to be used when color-coding semantic labels
Folders Load meta-data and split Load all masks Find all images Load image | 1,342 | en | 0.691806 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class MonitorClientConfiguration(Configuration):
"""Configuration for MonitorClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
"""
def __init__(
self,
credential, # type: "TokenCredential"
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(MonitorClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.api_version = "2017-05-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-eventhub/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_05_01_preview/_configuration.py | 2,945 | Configuration for MonitorClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- pylint: disable=unused-import,ungrouped-imports type: "TokenCredential" type: Any type: (...) -> None type: Any type: (...) -> None | 840 | en | 0.614617 |
# -*- test-case-name: vumi.transports.xmpp.tests.test_xmpp -*-
# -*- encoding: utf-8 -*-
from twisted.python import log
from twisted.words.protocols.jabber.jid import JID
from twisted.words.xish import domish
from twisted.words.xish.domish import Element as DomishElement
from twisted.internet.task import LoopingCall
from twisted.internet.defer import inlineCallbacks
from wokkel.client import XMPPClient
from wokkel.ping import PingClientProtocol
from wokkel.xmppim import (RosterClientProtocol, MessageProtocol,
PresenceClientProtocol)
from vumi.transports.base import Transport
class TransportRosterClientProtocol(RosterClientProtocol):
def connectionInitialized(self):
# get the roster as soon as the connection's been initialized, this
# allows us to see who's online but more importantly, allows us to see
# who's added us to their roster. This allows us to auto subscribe to
# anyone, automatically adding them to our roster, skips the "user ...
# wants to add you to their roster, allow? yes/no" hoopla.
self.getRoster()
class TransportPresenceClientProtocol(PresenceClientProtocol):
"""
A custom presence protocol to automatically accept any subscription
attempt.
"""
def __init__(self, initialized_callback, *args, **kwargs):
super(TransportPresenceClientProtocol, self).__init__(*args, **kwargs)
self.initialized_callback = initialized_callback
def connectionInitialized(self):
super(TransportPresenceClientProtocol, self).connectionInitialized()
self.initialized_callback()
def subscribeReceived(self, entity):
self.subscribe(entity)
self.subscribed(entity)
def unsubscribeReceived(self, entity):
self.unsubscribe(entity)
self.unsubscribed(entity)
class XMPPTransportProtocol(MessageProtocol, object):
def __init__(self, jid, message_callback, connection_callback,
connection_lost_callback=None,):
super(MessageProtocol, self).__init__()
self.jid = jid
self.message_callback = message_callback
self.connection_callback = connection_callback
self.connection_lost_callback = connection_lost_callback
def reply(self, jid, content):
message = domish.Element((None, "message"))
# intentionally leaving from blank, leaving for XMPP server
# to figure out
message['to'] = jid
message['type'] = 'chat'
message.addUniqueId()
message.addElement((None, 'body'), content=content)
self.xmlstream.send(message)
def onMessage(self, message):
"""Messages sent to the bot will arrive here. Command handling routing
is done in this function."""
if not isinstance(message.body, DomishElement):
return None
text = unicode(message.body).encode('utf-8').strip()
from_addr, _, _ = message['from'].partition('/')
self.message_callback(
to_addr=self.jid.userhost(),
from_addr=from_addr,
content=text,
transport_type='xmpp',
transport_metadata={
'xmpp_id': message.getAttribute('id'),
})
def connectionMade(self):
self.connection_callback()
return super(XMPPTransportProtocol, self).connectionMade()
def connectionLost(self, reason):
if self.connection_lost_callback is not None:
self.connection_lost_callback(reason)
log.msg("XMPP Connection lost.")
super(XMPPTransportProtocol, self).connectionLost(reason)
class XMPPTransport(Transport):
"""XMPP transport.
Configuration parameters:
:type host: str
:param host:
The host of the XMPP server to connect to.
:type port: int
:param port:
The port on the XMPP host to connect to.
:type debug: bool
:param debug:
Whether or not to show all the XMPP traffic. Defaults to False.
:type username: str
:param username:
The XMPP account username
:type password: str
:param password:
The XMPP account password
:type status: str
:param status:
The XMPP status 'away', 'xa', 'chat' or 'dnd'
:type status_message: str
:param status_message:
The natural language status message for this XMPP transport.
:type presence_interval: int
:param presence_interval:
How often (in seconds) to send a presence update to the roster.
:type ping_interval: int
:param ping_interval:
How often (in seconds) to send a keep-alive ping to the XMPP server
to keep the connection alive. Defaults to 60 seconds.
"""
start_message_consumer = False
_xmpp_protocol = XMPPTransportProtocol
_xmpp_client = XMPPClient
def __init__(self, options, config=None):
super(XMPPTransport, self).__init__(options, config=config)
self.ping_call = LoopingCall(self.send_ping)
self.presence_call = LoopingCall(self.send_presence)
def validate_config(self):
self.host = self.config['host']
self.port = int(self.config['port'])
self.debug = self.config.get('debug', False)
self.username = self.config['username']
self.password = self.config['password']
self.status = self.config['status']
self.status_message = self.config.get('status_message', '')
self.ping_interval = self.config.get('ping_interval', 60)
self.presence_interval = self.config.get('presence_interval', 60)
def setup_transport(self):
log.msg("Starting XMPPTransport: %s" % self.transport_name)
self.jid = JID(self.username)
self.xmpp_client = self._xmpp_client(self.jid, self.password,
self.host, self.port)
self.xmpp_client.logTraffic = self.debug
self.xmpp_client.setServiceParent(self)
self.presence = TransportPresenceClientProtocol(self.announce_presence)
self.presence.setHandlerParent(self.xmpp_client)
self.pinger = PingClientProtocol()
self.pinger.setHandlerParent(self.xmpp_client)
self.ping_call.start(self.ping_interval, now=False)
roster = TransportRosterClientProtocol()
roster.setHandlerParent(self.xmpp_client)
self.xmpp_protocol = self._xmpp_protocol(
self.jid, self.publish_message, self.message_consumer.unpause)
self.xmpp_protocol.setHandlerParent(self.xmpp_client)
log.msg("XMPPTransport %s started." % self.transport_name)
def announce_presence(self):
if not self.presence_call.running:
self.presence_call.start(self.presence_interval)
@inlineCallbacks
def send_ping(self):
if self.xmpp_client.xmlstream:
yield self.pinger.ping(self.jid)
def send_presence(self):
if self.xmpp_client.xmlstream:
self.presence.available(statuses={
None: self.status})
def teardown_transport(self):
log.msg("XMPPTransport %s stopped." % self.transport_name)
ping_call = getattr(self, 'ping_call', None)
if ping_call and ping_call.running:
ping_call.stop()
presence_call = getattr(self, 'presence_call', None)
if presence_call and presence_call.running:
presence_call.stop()
def handle_outbound_message(self, message):
recipient = message['to_addr']
text = message['content']
jid = JID(recipient).userhost()
if not self.xmpp_protocol.xmlstream:
log.err("Outbound undeliverable, XMPP not initialized yet.")
return False
else:
self.xmpp_protocol.reply(jid, text)
| vumi/transports/xmpp/xmpp.py | 7,769 | A custom presence protocol to automatically accept any subscription
attempt.
XMPP transport.
Configuration parameters:
:type host: str
:param host:
The host of the XMPP server to connect to.
:type port: int
:param port:
The port on the XMPP host to connect to.
:type debug: bool
:param debug:
Whether or not to show all the XMPP traffic. Defaults to False.
:type username: str
:param username:
The XMPP account username
:type password: str
:param password:
The XMPP account password
:type status: str
:param status:
The XMPP status 'away', 'xa', 'chat' or 'dnd'
:type status_message: str
:param status_message:
The natural language status message for this XMPP transport.
:type presence_interval: int
:param presence_interval:
How often (in seconds) to send a presence update to the roster.
:type ping_interval: int
:param ping_interval:
How often (in seconds) to send a keep-alive ping to the XMPP server
to keep the connection alive. Defaults to 60 seconds.
Messages sent to the bot will arrive here. Command handling routing
is done in this function.
-*- test-case-name: vumi.transports.xmpp.tests.test_xmpp -*- -*- encoding: utf-8 -*- get the roster as soon as the connection's been initialized, this allows us to see who's online but more importantly, allows us to see who's added us to their roster. This allows us to auto subscribe to anyone, automatically adding them to our roster, skips the "user ... wants to add you to their roster, allow? yes/no" hoopla. intentionally leaving from blank, leaving for XMPP server to figure out | 1,583 | en | 0.7622 |
# /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Modul is used for GUI of Lisa
"""
from loguru import logger
import sys
import click
from pathlib import Path
import ast
from . import app_tools
# print("start")
# from . import image
# print("start 5")
# print("start 6")
# from scaffan import algorithm
from . import algorithm
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
# print("Running __main__.py")
# @batch_detect.command(context_settings=CONTEXT_SETTINGS)
# @click.argument("image_stack_dir", type=click.Path(exists=True))
# @click.argument("working_dir", type=click.Path())
# @click.option("--create-icon", is_flag=True,
# help="Create desktop icon"
# )
@click.group(context_settings=CONTEXT_SETTINGS, invoke_without_command=True)
@click.pass_context
def run(ctx, *args, **kwargs):
if ctx.invoked_subcommand is None:
# click.echo('I was invoked without subcommand')
ctx.invoke(gui, *args, **kwargs)
# a.main()
else:
pass
click.echo("I am about to invoke %s" % ctx.invoked_subcommand)
pass
# @run.command(context_settings=CONTEXT_SETTINGS, help="Set persistent values")
# @click.option("--common-spreadsheet-file", help="Set path for common spreadsheet file.", type=click.Path())
# def set(common_spreadsheet_file=None):
# mainapp = algorithm.AnimalWatch()
# if common_spreadsheet_file is not None:
# mainapp.set_common_spreadsheet_file(path=common_spreadsheet_file)
# logger.info(f"Common spreadsheet file path is : {common_spreadsheet_file}")
# print(f"Common spreadsheet file path is : {common_spreadsheet_file}")
# def print_params(params):
# algorithm.Scaffan().parameters.
# params.
@run.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--params",
"-p",
multiple=True,
default=None,
nargs=2,
help='Set parameter. First argument is path to parameter separated by ";". Second is the value.'
"python -m scaffan gui -p Processing;Show True",
)
@click.option("--print-params", "-pp", is_flag=True, help="Print parameters")
def gui(params, print_params):
mainapp = algorithm.AnimalWatch()
if print_params:
make_print_params(mainapp)
exit()
# mainapp.parameters.param(*param[0].split(";")).setValue(ast.literal_eval(param[1]))
set_params(mainapp, params)
mainapp.start_gui()
def set_params(mainapp, params):
if params is not None:
logger.debug("set_params() ...")
app_tools.set_parameters_by_path(mainapp.parameters, params)
# for param in params:
# mainapp.set_parameter(param[0], value=ast.literal_eval(param[1]))
def make_print_params(mainapp):
import pprint
pprint.pprint(mainapp.parameters_to_dict())
@run.command(
context_settings=CONTEXT_SETTINGS, help="Create an icon on Windows platform"
)
def install():
from .app_tools import create_icon
icon_filename = Path(__file__).parent / Path("anwa.ico")
create_icon("anwa", icon_filename, conda_env_name="anwa_app")
# print(platform.system)
# if platform.system() == "Windows":
# import pathlib
# pass
@run.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--input-path",
"-i",
type=click.Path(exists=True),
help='Path to input directory with video files.',
)
@click.option(
"--params",
"-p",
multiple=True,
default=None,
nargs=2,
help='Set parameter. First argument is path to parameter separated by ";". Second is the value.'
"python -m anwa nogui -p Processing;Show True",
)
@click.option("--print-params", "-pp", is_flag=True, help="Print parameters")
def nogui(input_path, params, print_params):
mainapp = algorithm.AnimalWatch()
logger.debug(f"params={params})")
if print_params:
make_print_params(mainapp)
exit()
set_params(mainapp, params)
# for param in params:
# mainapp.parameters.param(*param[0].split(";")).setValue(
# ast.literal_eval(param[1])
# )
mainapp.set_input_dir(input_path)
# mainapp.start_gui()
mainapp.run()
# def install():
| anwa/main_click.py | 4,152 | Modul is used for GUI of Lisa
/usr/bin/env python -*- coding: utf-8 -*- print("start") from . import image print("start 5") print("start 6") from scaffan import algorithm print("Running __main__.py") @batch_detect.command(context_settings=CONTEXT_SETTINGS) @click.argument("image_stack_dir", type=click.Path(exists=True)) @click.argument("working_dir", type=click.Path()) @click.option("--create-icon", is_flag=True, help="Create desktop icon" ) click.echo('I was invoked without subcommand') a.main() @run.command(context_settings=CONTEXT_SETTINGS, help="Set persistent values") @click.option("--common-spreadsheet-file", help="Set path for common spreadsheet file.", type=click.Path()) def set(common_spreadsheet_file=None): mainapp = algorithm.AnimalWatch() if common_spreadsheet_file is not None: mainapp.set_common_spreadsheet_file(path=common_spreadsheet_file) logger.info(f"Common spreadsheet file path is : {common_spreadsheet_file}") print(f"Common spreadsheet file path is : {common_spreadsheet_file}") def print_params(params): algorithm.Scaffan().parameters. params. mainapp.parameters.param(*param[0].split(";")).setValue(ast.literal_eval(param[1])) for param in params: mainapp.set_parameter(param[0], value=ast.literal_eval(param[1])) print(platform.system) if platform.system() == "Windows": import pathlib pass for param in params: mainapp.parameters.param(*param[0].split(";")).setValue( ast.literal_eval(param[1]) ) mainapp.start_gui() def install(): | 1,564 | en | 0.342508 |
'''
Autor: Gurkirt Singh
Start data: 2nd May 2016
purpose: of this file is to take all .mp4 videos and convert them to jpg images
'''
import numpy as np
import cv2 as cv2
import math,pickle,shutil,os
baseDir = "/mnt/sun-alpha/actnet/";
vidDir = "/mnt/earth-beta/actnet/videos/";
imgDir = "/mnt/sun-alpha/actnet/rgb-images/";
annotPklFile = "../Evaluation/data/actNet200-V1-3.pkl"
#os.mkdir(imgDir)
annotFile = "../anetv13.json"
def getAnnotations():
with open(annotFile) as f:
annoData = json.load(f)
taxonomy = annoData["taxonomy"]
version = annoData["version"]
database = annoData["database"]
print len(database),version,len(taxonomy)
def getNumFrames(filename):
cap = cv2.VideoCapture(filename)
if not cap.isOpened():
print "could not open :",filename
return -1
numf = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
return numf
def getVidedInfo(filename):
try:
cap = cv2.VideoCapture(filename)
except cv2.error as e:
print e
return 0,0,0,0,-1
if not cap.isOpened():
print "could not open :",filename
return 0,0,0,0,-1
numf = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
return numf,width,height,fps,cap
def getsmallestDimto256(width,height):
if width>=height:
newH = 256
newW = int(math.ceil((float(newH)/height)*width))
else:
newW = 256
newH = int(math.ceil((float(newW)/width)*height))
return newW,newH
def getframelabels(annotations,numf):
framelabels = np.ones(numf,dtype='uint16')*200;
for annot in annotations:
actionId = annot['class']
startframe = annot['sf']
endframe = annot['ef']
framelabels[startframe:endframe] = int(actionId)-1
return framelabels
def movefiles(storageDir,framelabels,numfs):
dst = ''
for ind in range(numfs):
label = framelabels[ind]
src = storageDir+str(ind).zfill(5)+".jpg"
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
shutil.move(src,dst)
print dst ,' MOVED '
def convertVideosL():
print "this is convertVideos function with labels"
ecount = 0
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in reversed(database.keys()):
ecount+=1
if ecount>0:
videoInfo = database[videoId]
storageDir = imgDir+'v_'+videoId+"/"
print videoInfo['subset']
if not videoInfo['isnull'] and not videoInfo['subset'] == 'testing':
videoname = vidDir+'v_'+videoId+'.mp4'
if not os.path.isfile(videoname):
videoname = vidDir+'v_'+videoId+'.mkv'
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
annotations = videoInfo['annotations']
framelabels = getframelabels(annotations,numfs)
imgname = storageDir+str(numfs-1).zfill(5)+".jpg"
if os.path.isfile(imgname):
movefiles(storageDir,framelabels,numfs)
else:
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if not os.path.isfile(dst):
numf,width,height,fps,cap = getVidedInfo(videoname)
if not cap == -1 and numf == numfs:
newW=256;newH=256;
framecount = 0;
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for ind in xrange(numf):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
retval,image = cap.read()
if not image is None:
resizedImage = cv2.resize(image,(newW,newH))
cv2.imwrite(dst,resizedImage)
else:
cv2.imwrite(dst,resizedImage)
print ' . ',
print dst , 'is created'
else:
with open('vids/'+videoId+'.txt','wb') as f:
f.write('error')
else:
print dst , 'is already there'
def convertTestVideos():
print "this is convertVideos function with labels"
ecount = 0
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in database.keys():
ecount+=1
if ecount>0:
videoInfo = database[videoId]
storageDir = imgDir+'v_'+videoId+"/"
print videoInfo['subset']
if not videoInfo['isnull'] and videoInfo['subset'] == 'testing':
videoname = vidDir+'v_'+videoId+'.mp4'
if not os.path.isfile(videoname):
videoname = vidDir+'v_'+videoId+'.mkv'
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
# annotations = videoInfo['annotations']
framelabels = np.ones(numfs,dtype='uint16')*200;
imgname = storageDir+str(numfs-1).zfill(5)+".jpg"
if os.path.isfile(imgname):
movefiles(storageDir,framelabels,numfs)
else:
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if not os.path.isfile(dst):
numf,width,height,fps,cap = getVidedInfo(videoname)
if not cap == -1 and numf == numfs:
newW=256;newH=256;
framecount = 0;
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for ind in xrange(numf):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
retval,image = cap.read()
if not image is None:
resizedImage = cv2.resize(image,(newW,newH))
cv2.imwrite(dst,resizedImage)
else:
cv2.imwrite(dst,resizedImage)
print ' . ',
print dst , 'is created'
else:
with open('vids/'+videoId+'.txt','wb') as f:
f.write('error')
else:
print dst , 'is already there'
def convertVideos():
print "this is convertVideos function"
## vidDir = vidDirtemp
vidlist = os.listdir(vidDir)
vidlist = [vid for vid in vidlist if vid.startswith("v_")]
print "Number of sucessfully donwloaded ",len(vidlist)
vcount =0
for videname in reversed(vidlist):
vcount+=1
if vcount>0:
src = vidDir+videname
numf,width,height,fps,cap = getVidedInfo(src)
if not cap == -1:
newW=256;newH=256;
print videname, width,height,' and newer are ',newW,newH, ' fps ',fps,' numf ', numf, ' vcount ',vcount
framecount = 0;
storageDir = imgDir+videname.split('.')[0]+"/"
imgname = storageDir+str(numf-1).zfill(5)+".jpg"
if not os.path.isfile(imgname):
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for f in xrange(numf):
retval,image = cap.read()
if not image is None:
# print np.shape(retval),np.shape(image), type(image),f
resizedImage = cv2.resize(image,(newW,newH))
imgname = storageDir+str(framecount).zfill(5)+".jpg"
cv2.imwrite(imgname,resizedImage)
else:
imgname = storageDir+str(framecount).zfill(5)+".jpg"
cv2.imwrite(imgname,resizedImage)
print 'we have missing frame ',framecount
framecount+=1
print imgname
else:
with open('vids/'+videname.split('.')[0]+'.txt','wb') as f:
f.write('error')
def getframelabels4both(annotations,numf,subset):
framelabels = np.ones(numf,dtype='uint16')*200;
if subset == 'testing':
return framelabels
for annot in annotations:
actionId = annot['class']
startframe = annot['sf']
endframe = annot['ef']
framelabels[startframe:endframe] = int(actionId)-1
return framelabels
def genVideoImageLists():
subset = 'testing'
print "this is genVideoImageLists function"
ecount = 0; vcount = 0;
listname = '{}lists/{}.list'.format(baseDir,subset)
fid = open(listname,'wb')
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in database.keys():
ecount+=1
if ecount>0:
videoInfo = database[videoId]
if not videoInfo['isnull'] and videoInfo['subset'] == subset:
vcount+=1
storageDir = imgDir+'v_'+videoId+"/"
videlistName = '{}lists/{}/v_{}.list'.format(baseDir,subset,videoId)
fid.write(videlistName+'\n');
vfid = open(videlistName,'wb');
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
annotations = videoInfo['annotations']
framelabels = getframelabels4both(annotations,numfs,subset)
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if os.path.isfile(dst):
for ind in xrange(numfs):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
vfid.write(dst+'\n')
else:
RuntimeError('check if file exists '+dst)
def checkConverted():
print "this is checkConverted videos function"
vidlist = os.listdir(vidDir)
vidlist = [vid for vid in vidlist if vid.endswith(".mp4")]
print "Number of sucessfully donwloaded ",len(vidlist)
vcount =0
for videname in vidlist[15000:]:
src = vidDir+videname
numF = getNumFrames(src)
if numF>0:
imgname = imgDir+videname.split('.')[0]+"/"+str(numF-1).zfill(5)+".jpg"
print 'last frame is ',imgname,' vocunt ',vcount
vcount+=1
dst = vidDirtemp+videname
if not os.path.isfile(imgname):
shutil.move(src,dst)
print " moved this one to ", dst
if __name__=="__main__":
# checkConverted()
# convertVideosL()
# convertTestVideos()
genVideoImageLists()
| python-scripts/convertMP4toJPG.py | 12,384 | os.mkdir(imgDir) annotations = videoInfo['annotations'] vidDir = vidDirtemp print np.shape(retval),np.shape(image), type(image),f checkConverted() convertVideosL() convertTestVideos() | 186 | en | 0.157423 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conversion module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.framework import constant_op
from tensorflow.python.keras.engine import training
from tensorflow.python.platform import test
class ConversionTest(test.TestCase):
def _simple_program_ctx(self):
return converter.ProgramContext(
options=converter.ConversionOptions(recursive=True),
autograph_module=api)
def test_is_whitelisted_for_graph(self):
def test_fn():
return constant_op.constant(1)
self.assertFalse(conversion.is_whitelisted_for_graph(test_fn))
self.assertTrue(conversion.is_whitelisted_for_graph(utils))
self.assertTrue(conversion.is_whitelisted_for_graph(constant_op.constant))
def test_entity_to_graph_unsupported_types(self):
with self.assertRaises(NotImplementedError):
program_ctx = self._simple_program_ctx()
conversion.entity_to_graph('dummy', program_ctx, None, None)
def test_entity_to_graph_callable(self):
b = 2
def f(a):
return a + b
program_ctx = self._simple_program_ctx()
nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertIs(ns['b'], b)
def test_entity_to_graph_function_with_defaults(self):
b = 2
c = 1
def f(a, d=c + 1):
return a + b + d
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertEqual(
compiler.ast_to_source(fn_node.args.defaults[0]).strip(), 'None')
def test_entity_to_graph_call_tree(self):
def g(a):
return a
def f(a):
return g(a)
program_ctx = self._simple_program_ctx()
nodes, _, _ = conversion.entity_to_graph(f, program_ctx, None, None)
f_node = nodes[0]
self.assertEqual('tf__f', f_node.name)
def test_entity_to_graph_class_hierarchy(self):
class TestBase(object):
def __init__(self, x='base'):
self.x = x
def foo(self):
return self.x
def bar(self):
return self.x
class TestSubclass(TestBase):
def __init__(self, y):
super(TestSubclass, self).__init__('sub')
self.y = y
def foo(self):
return self.y
def baz(self):
return self.y
program_ctx = self._simple_program_ctx()
with self.assertRaisesRegex(NotImplementedError, 'classes.*whitelisted'):
conversion.entity_to_graph(TestSubclass, program_ctx, None, None)
def test_entity_to_graph_class_hierarchy_whitelisted(self):
class TestSubclass(training.Model):
def __init__(self, y):
super(TestSubclass, self).__init__()
self.built = False
def call(self, x):
return 3 * x
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.entity_to_graph(TestSubclass, program_ctx, None,
None)
class_node = nodes[-2] # TODO(mdan): This is brittle.
self.assertEqual(name, 'TfTestSubclass')
self.assertEqual(class_node.name, 'TfTestSubclass')
def test_entity_to_graph_lambda(self):
b = 2
f = lambda x: b * x if x > 0 else -x
program_ctx = self._simple_program_ctx()
nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(ns['b'], b)
def test_entity_to_graph_multiple_lambdas(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda y: b * y)
program_ctx = self._simple_program_ctx()
nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(ns['a'], a)
def test_entity_to_graph_multiple_lambdas_ambiguous_definitions(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda x: b * x)
program_ctx = self._simple_program_ctx()
with self.assertRaises(ValueError):
conversion.entity_to_graph(f, program_ctx, None, None)
def test_entity_to_graph_lambda_code_with_garbage(self):
# pylint:disable=g-long-lambda
f = ( # intentional wrap
lambda x: (x # intentional wrap
+ 1),)[0]
# pylint:enable=g-long-lambda
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
def test_entity_to_graph_nested_functions(self):
b = 2
def f(x):
def g(x):
return b * x
return g(x)
program_ctx = self._simple_program_ctx()
nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual(fn_node.name, 'tf__f')
self.assertEqual('tf__f', name)
self.assertIs(ns['b'], b)
def test_ag_module_cached(self):
def callee():
return range(3)
def caller(a):
return a()
program_ctx = self._simple_program_ctx()
_, _, callee_ns = conversion.entity_to_graph(callee, program_ctx, None,
None)
_, _, caller_ns = conversion.entity_to_graph(caller, program_ctx, None,
None)
self.assertTrue(callee_ns['ag__'] is caller_ns['ag__'])
if __name__ == '__main__':
test.main()
| tensorflow/python/autograph/impl/conversion_test.py | 6,931 | Tests for conversion module.
Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== TODO(mdan): This is brittle. pylint:disable=g-long-lambda intentional wrap intentional wrap pylint:enable=g-long-lambda | 811 | en | 0.763059 |
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Meta-class for creating regression tests.
#
import functools
import types
import reframe.core.namespaces as namespaces
import reframe.core.parameters as parameters
import reframe.core.variables as variables
import reframe.core.hooks as hooks
import reframe.utility as utils
from reframe.core.exceptions import ReframeSyntaxError
from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression
class RegressionTestMeta(type):
class MetaNamespace(namespaces.LocalNamespace):
'''Custom namespace to control the cls attribute assignment.
Regular Python class attributes can be overridden by either
parameters or variables respecting the order of execution.
A variable or a parameter may not be declared more than once in the
same class body. Overriding a variable with a parameter or the other
way around has an undefined behavior. A variable's value may be
updated multiple times within the same class body. A parameter's
value cannot be updated more than once within the same class body.
'''
def __setitem__(self, key, value):
if isinstance(value, variables.TestVar):
# Insert the attribute in the variable namespace
try:
self['_rfm_local_var_space'][key] = value
value.__set_name__(self, key)
except KeyError:
raise ReframeSyntaxError(
f'variable {key!r} is already declared'
) from None
# Override the regular class attribute (if present) and return
self._namespace.pop(key, None)
return
elif isinstance(value, parameters.TestParam):
# Insert the attribute in the parameter namespace
try:
self['_rfm_local_param_space'][key] = value
except KeyError:
raise ReframeSyntaxError(
f'parameter {key!r} is already declared in this class'
) from None
# Override the regular class attribute (if present) and return
self._namespace.pop(key, None)
return
elif key in self['_rfm_local_param_space']:
raise ReframeSyntaxError(
f'cannot override parameter {key!r}'
)
else:
# Insert the items manually to overide the namespace clash
# check from the base namespace.
self._namespace[key] = value
# Register functions decorated with either @sanity_function or
# @performance_variables or @performance_function decorators.
if hasattr(value, '_rfm_sanity_fn'):
try:
super().__setitem__('_rfm_sanity', value)
except KeyError:
raise ReframeSyntaxError(
'the @sanity_function decorator can only be used '
'once in the class body'
) from None
elif hasattr(value, '_rfm_perf_key'):
try:
self['_rfm_perf_fns'][key] = value
except KeyError:
raise ReframeSyntaxError(
f'the performance function {key!r} has already been '
f'defined in this class'
) from None
# Register the final methods
if hasattr(value, '_rfm_final'):
self['_rfm_final_methods'].add(key)
# Register the hooks - if a value does not meet the conditions
# it will be simply ignored
self['_rfm_hook_registry'].add(value)
def __getitem__(self, key):
'''Expose and control access to the local namespaces.
Variables may only be retrieved if their value has been previously
set. Accessing a parameter in the class body is disallowed (the
actual test parameter is set during the class instantiation).
'''
try:
return super().__getitem__(key)
except KeyError as err:
try:
# Handle variable access
return self['_rfm_local_var_space'][key]
except KeyError:
# Handle parameter access
if key in self['_rfm_local_param_space']:
raise ReframeSyntaxError(
'accessing a test parameter from the class '
'body is disallowed'
) from None
else:
# As the last resource, look if key is a variable in
# any of the base classes. If so, make its value
# available in the current class' namespace.
for b in self['_rfm_bases']:
if key in b._rfm_var_space:
# Store a deep-copy of the variable's
# value and return.
v = b._rfm_var_space[key].default_value
self._namespace[key] = v
return self._namespace[key]
# If 'key' is neither a variable nor a parameter,
# raise the exception from the base __getitem__.
raise err from None
def reset(self, key):
'''Reset an item to rerun it through the __setitem__ logic.'''
self[key] = self[key]
class WrappedFunction:
'''Descriptor to wrap a free function as a bound-method.
The free function object is wrapped by the constructor. Instances
of this class should be inserted into the namespace of the target class
with the desired name for the bound-method. Since this class is a
descriptor, the `__get__` method will return the right bound-method
when accessed from a class instance.
:meta private:
'''
__slots__ = ('fn')
def __init__(self, fn, name=None):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
self.fn = _fn
if name:
self.fn.__name__ = name
def __get__(self, obj, objtype=None):
if objtype is None:
objtype = type(obj)
self.fn.__qualname__ = '.'.join(
[objtype.__qualname__, self.fn.__name__]
)
if obj is None:
return self.fn
return types.MethodType(self.fn, obj)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def __getattr__(self, name):
if name in self.__slots__:
return super().__getattr__(name)
else:
return getattr(self.fn, name)
def __setattr__(self, name, value):
if name in self.__slots__:
super().__setattr__(name, value)
else:
setattr(self.fn, name, value)
@classmethod
def __prepare__(metacls, name, bases, **kwargs):
namespace = super().__prepare__(name, bases, **kwargs)
# Keep reference to the bases inside the namespace
namespace['_rfm_bases'] = [
b for b in bases if hasattr(b, '_rfm_var_space')
]
# Regression test parameter space defined at the class level
local_param_space = namespaces.LocalNamespace()
namespace['_rfm_local_param_space'] = local_param_space
# Directive to insert a regression test parameter directly in the
# class body as: `P0 = parameter([0,1,2,3])`.
namespace['parameter'] = parameters.TestParam
# Regression test var space defined at the class level
local_var_space = namespaces.LocalNamespace()
namespace['_rfm_local_var_space'] = local_var_space
# Directives to add/modify a regression test variable
namespace['variable'] = variables.TestVar
namespace['required'] = variables.Undefined
# Utility decorators
namespace['_rfm_ext_bound'] = set()
def bind(fn, name=None):
'''Directive to bind a free function to a class.
See online docs for more information.
.. note::
Functions bound using this directive must be re-inspected after
the class body execution has completed. This directive attaches
the external method into the class namespace and returns the
associated instance of the :class:`WrappedFunction`. However,
this instance may be further modified by other ReFrame builtins
such as :func:`run_before`, :func:`run_after`, :func:`final` and
so on after it was added to the namespace, which would bypass
the logic implemented in the :func:`__setitem__` method from the
:class:`MetaNamespace` class. Hence, we track the items set by
this directive in the ``_rfm_ext_bound`` set, so they can be
later re-inspected.
'''
inst = metacls.WrappedFunction(fn, name)
namespace[inst.__name__] = inst
# Track the imported external functions
namespace['_rfm_ext_bound'].add(inst.__name__)
return inst
def final(fn):
'''Indicate that a function is final and cannot be overridden.'''
fn._rfm_final = True
return fn
namespace['bind'] = bind
namespace['final'] = final
namespace['_rfm_final_methods'] = set()
# Hook-related functionality
def run_before(stage):
'''Decorator for attaching a test method to a given stage.
See online docs for more information.
'''
return hooks.attach_to('pre_' + stage)
def run_after(stage):
'''Decorator for attaching a test method to a given stage.
See online docs for more information.
'''
return hooks.attach_to('post_' + stage)
namespace['run_before'] = run_before
namespace['run_after'] = run_after
namespace['require_deps'] = hooks.require_deps
namespace['_rfm_hook_registry'] = hooks.HookRegistry()
# Machinery to add a sanity function
def sanity_function(fn):
'''Mark a function as the test's sanity function.
Decorated functions must be unary and they will be converted into
deferred expressions.
'''
_def_fn = deferrable(fn)
setattr(_def_fn, '_rfm_sanity_fn', True)
return _def_fn
namespace['sanity_function'] = sanity_function
namespace['deferrable'] = deferrable
# Machinery to add performance functions
def performance_function(units, *, perf_key=None):
'''Decorate a function to extract a performance variable.
The ``units`` argument indicates the units of the performance
variable to be extracted.
The ``perf_key`` optional arg will be used as the name of the
performance variable. If not provided, the function name will
be used as the performance variable name.
'''
if not isinstance(units, str):
raise TypeError('performance units must be a string')
if perf_key and not isinstance(perf_key, str):
raise TypeError("'perf_key' must be a string")
def _deco_wrapper(func):
if not utils.is_trivially_callable(func, non_def_args=1):
raise TypeError(
f'performance function {func.__name__!r} has more '
f'than one argument without a default value'
)
@functools.wraps(func)
def _perf_fn(*args, **kwargs):
return _DeferredPerformanceExpression(
func, units, *args, **kwargs
)
_perf_key = perf_key if perf_key else func.__name__
setattr(_perf_fn, '_rfm_perf_key', _perf_key)
return _perf_fn
return _deco_wrapper
namespace['performance_function'] = performance_function
namespace['_rfm_perf_fns'] = namespaces.LocalNamespace()
return metacls.MetaNamespace(namespace)
def __new__(metacls, name, bases, namespace, **kwargs):
'''Remove directives from the class namespace.
It does not make sense to have some directives available after the
class was created or even at the instance level (e.g. doing
``self.parameter([1, 2, 3])`` does not make sense). So here, we
intercept those directives out of the namespace before the class is
constructed.
'''
directives = [
'parameter', 'variable', 'bind', 'run_before', 'run_after',
'require_deps', 'required', 'deferrable', 'sanity_function',
'final', 'performance_function'
]
for b in directives:
namespace.pop(b, None)
# Reset the external functions imported through the bind directive.
for item in namespace.pop('_rfm_ext_bound'):
namespace.reset(item)
return super().__new__(metacls, name, bases, dict(namespace), **kwargs)
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
# Create a set with the attribute names already in use.
cls._rfm_dir = set()
for base in (b for b in bases if hasattr(b, '_rfm_dir')):
cls._rfm_dir.update(base._rfm_dir)
used_attribute_names = set(cls._rfm_dir)
# Build the var space and extend the target namespace
variables.VarSpace(cls, used_attribute_names)
used_attribute_names.update(cls._rfm_var_space.vars)
# Build the parameter space
parameters.ParamSpace(cls, used_attribute_names)
# Update used names set with the local __dict__
cls._rfm_dir.update(cls.__dict__)
# Update the hook registry with the bases
for base in cls._rfm_bases:
cls._rfm_hook_registry.update(
base._rfm_hook_registry, denied_hooks=namespace
)
# Search the bases if no local sanity functions exist.
if '_rfm_sanity' not in namespace:
for base in cls._rfm_bases:
if hasattr(base, '_rfm_sanity'):
cls._rfm_sanity = getattr(base, '_rfm_sanity')
if cls._rfm_sanity.__name__ in namespace:
raise ReframeSyntaxError(
f'{cls.__qualname__!r} overrides the candidate '
f'sanity function '
f'{cls._rfm_sanity.__qualname__!r} without '
f'defining an alternative'
)
break
# Update the performance function dict with the bases.
for base in cls._rfm_bases:
for k, v in base._rfm_perf_fns.items():
if k not in namespace:
try:
cls._rfm_perf_fns[k] = v
except KeyError:
'''Performance function overridden by other class'''
# Add the final functions from its parents
cls._rfm_final_methods.update(
*(b._rfm_final_methods for b in cls._rfm_bases)
)
if getattr(cls, '_rfm_override_final', None):
return
for b in cls._rfm_bases:
for key in b._rfm_final_methods:
if key in namespace and callable(namespace[key]):
msg = (f"'{cls.__qualname__}.{key}' attempts to "
f"override final method "
f"'{b.__qualname__}.{key}'; "
f"you should use the pipeline hooks instead")
raise ReframeSyntaxError(msg)
def __call__(cls, *args, **kwargs):
'''Inject parameter and variable spaces during object construction.
When a class is instantiated, this method intercepts the arguments
associated to the parameter and variable spaces. This prevents both
:func:`__new__` and :func:`__init__` methods from ever seing these
arguments.
The parameter and variable spaces are injected into the object after
construction and before initialization.
'''
# Intercept constructor arguments
_rfm_use_params = kwargs.pop('_rfm_use_params', False)
obj = cls.__new__(cls, *args, **kwargs)
# Insert the var & param spaces
cls._rfm_var_space.inject(obj, cls)
cls._rfm_param_space.inject(obj, cls, _rfm_use_params)
obj.__init__(*args, **kwargs)
return obj
def __getattribute__(cls, name):
'''Attribute lookup method for custom class attributes.
ReFrame test variables are descriptors injected at the class level.
If a variable descriptor has already been injected into the class,
do not return the descriptor object and return the default value
associated with that variable instead.
.. warning::
.. versionchanged:: 3.7.0
Prior versions exposed the variable descriptor object if this
was already present in the class, instead of returning the
variable's default value.
'''
try:
var_space = super().__getattribute__('_rfm_var_space')
except AttributeError:
var_space = None
# If the variable is already injected, delegate lookup to __getattr__.
if var_space and name in var_space.injected_vars:
raise AttributeError('delegate variable lookup to __getattr__')
# Default back to the base method if no special treatment required.
return super().__getattribute__(name)
def __getattr__(cls, name):
'''Backup attribute lookup method into custom namespaces.
Some ReFrame built-in types are stored under their own sub-namespaces.
This method will perform an attribute lookup on these sub-namespaces
if a call to the default :func:`__getattribute__` method fails to
retrieve the requested class attribute.
'''
try:
var_space = super().__getattribute__('_rfm_var_space')
return var_space.vars[name]
except AttributeError:
'''Catch early access attempt to the variable space.'''
except KeyError:
'''Requested name not in variable space.'''
try:
param_space = super().__getattribute__('_rfm_param_space')
return param_space.params[name]
except AttributeError:
'''Catch early access attempt to the parameter space.'''
except KeyError:
'''Requested name not in parameter space.'''
raise AttributeError(
f'class {cls.__qualname__!r} has no attribute {name!r}'
) from None
def setvar(cls, name, value):
'''Set the value of a variable.
:param name: The name of the variable.
:param value: The value of the variable.
:returns: :class:`True` if the variable was set.
A variable will *not* be set, if it does not exist or when an
attempt is made to set it with its underlying descriptor.
This happens during the variable injection time and it should be
delegated to the class' :func:`__setattr__` method.
:raises ReframeSyntaxError: If an attempt is made to override a
variable with a descriptor other than its underlying one.
'''
try:
var_space = super().__getattribute__('_rfm_var_space')
if name in var_space:
if not hasattr(value, '__get__'):
var_space[name].define(value)
return True
elif var_space[name].field is not value:
desc = '.'.join([cls.__qualname__, name])
raise ReframeSyntaxError(
f'cannot override variable descriptor {desc!r}'
)
else:
# Variable is being injected
return False
except AttributeError:
'''Catch early access attempt to the variable space.'''
return False
def __setattr__(cls, name, value):
'''Handle the special treatment required for variables and parameters.
A variable's default value can be updated when accessed as a regular
class attribute. This behavior does not apply when the assigned value
is a descriptor object. In that case, the task of setting the value is
delegated to the base :func:`__setattr__` (this is to comply with
standard Python behavior). However, since the variables are already
descriptors which are injected during class instantiation, we disallow
any attempt to override this descriptor (since it would be silently
re-overridden in any case).
Altering the value of a parameter when accessed as a class attribute
is not allowed. This would break the parameter space internals.
'''
# Try to treat `name` as variable
if cls.setvar(name, value):
return
# Try to treat `name` as a parameter
try:
# Catch attempts to override a test parameter
param_space = super().__getattribute__('_rfm_param_space')
if name in param_space.params:
raise ReframeSyntaxError(f'cannot override parameter {name!r}')
except AttributeError:
'''Catch early access attempt to the parameter space.'''
# Treat `name` as normal class attribute
super().__setattr__(name, value)
@property
def param_space(cls):
''' Make the parameter space available as read-only.'''
return cls._rfm_param_space
def is_abstract(cls):
'''Check if the class is an abstract test.
This is the case when some parameters are undefined, which results in
the length of the parameter space being 0.
:return: bool indicating whether the test has undefined parameters.
:meta private:
'''
return len(cls.param_space) == 0
| reframe/core/meta.py | 23,068 | Custom namespace to control the cls attribute assignment.
Regular Python class attributes can be overridden by either
parameters or variables respecting the order of execution.
A variable or a parameter may not be declared more than once in the
same class body. Overriding a variable with a parameter or the other
way around has an undefined behavior. A variable's value may be
updated multiple times within the same class body. A parameter's
value cannot be updated more than once within the same class body.
Descriptor to wrap a free function as a bound-method.
The free function object is wrapped by the constructor. Instances
of this class should be inserted into the namespace of the target class
with the desired name for the bound-method. Since this class is a
descriptor, the `__get__` method will return the right bound-method
when accessed from a class instance.
:meta private:
Inject parameter and variable spaces during object construction.
When a class is instantiated, this method intercepts the arguments
associated to the parameter and variable spaces. This prevents both
:func:`__new__` and :func:`__init__` methods from ever seing these
arguments.
The parameter and variable spaces are injected into the object after
construction and before initialization.
Backup attribute lookup method into custom namespaces.
Some ReFrame built-in types are stored under their own sub-namespaces.
This method will perform an attribute lookup on these sub-namespaces
if a call to the default :func:`__getattribute__` method fails to
retrieve the requested class attribute.
Attribute lookup method for custom class attributes.
ReFrame test variables are descriptors injected at the class level.
If a variable descriptor has already been injected into the class,
do not return the descriptor object and return the default value
associated with that variable instead.
.. warning::
.. versionchanged:: 3.7.0
Prior versions exposed the variable descriptor object if this
was already present in the class, instead of returning the
variable's default value.
Expose and control access to the local namespaces.
Variables may only be retrieved if their value has been previously
set. Accessing a parameter in the class body is disallowed (the
actual test parameter is set during the class instantiation).
Remove directives from the class namespace.
It does not make sense to have some directives available after the
class was created or even at the instance level (e.g. doing
``self.parameter([1, 2, 3])`` does not make sense). So here, we
intercept those directives out of the namespace before the class is
constructed.
Handle the special treatment required for variables and parameters.
A variable's default value can be updated when accessed as a regular
class attribute. This behavior does not apply when the assigned value
is a descriptor object. In that case, the task of setting the value is
delegated to the base :func:`__setattr__` (this is to comply with
standard Python behavior). However, since the variables are already
descriptors which are injected during class instantiation, we disallow
any attempt to override this descriptor (since it would be silently
re-overridden in any case).
Altering the value of a parameter when accessed as a class attribute
is not allowed. This would break the parameter space internals.
Directive to bind a free function to a class.
See online docs for more information.
.. note::
Functions bound using this directive must be re-inspected after
the class body execution has completed. This directive attaches
the external method into the class namespace and returns the
associated instance of the :class:`WrappedFunction`. However,
this instance may be further modified by other ReFrame builtins
such as :func:`run_before`, :func:`run_after`, :func:`final` and
so on after it was added to the namespace, which would bypass
the logic implemented in the :func:`__setitem__` method from the
:class:`MetaNamespace` class. Hence, we track the items set by
this directive in the ``_rfm_ext_bound`` set, so they can be
later re-inspected.
Indicate that a function is final and cannot be overridden.
Check if the class is an abstract test.
This is the case when some parameters are undefined, which results in
the length of the parameter space being 0.
:return: bool indicating whether the test has undefined parameters.
:meta private:
Make the parameter space available as read-only.
Decorate a function to extract a performance variable.
The ``units`` argument indicates the units of the performance
variable to be extracted.
The ``perf_key`` optional arg will be used as the name of the
performance variable. If not provided, the function name will
be used as the performance variable name.
Reset an item to rerun it through the __setitem__ logic.
Decorator for attaching a test method to a given stage.
See online docs for more information.
Decorator for attaching a test method to a given stage.
See online docs for more information.
Mark a function as the test's sanity function.
Decorated functions must be unary and they will be converted into
deferred expressions.
Set the value of a variable.
:param name: The name of the variable.
:param value: The value of the variable.
:returns: :class:`True` if the variable was set.
A variable will *not* be set, if it does not exist or when an
attempt is made to set it with its underlying descriptor.
This happens during the variable injection time and it should be
delegated to the class' :func:`__setattr__` method.
:raises ReframeSyntaxError: If an attempt is made to override a
variable with a descriptor other than its underlying one.
Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) ReFrame Project Developers. See the top-level LICENSE file for details. SPDX-License-Identifier: BSD-3-Clause Meta-class for creating regression tests. Insert the attribute in the variable namespace Override the regular class attribute (if present) and return Insert the attribute in the parameter namespace Override the regular class attribute (if present) and return Insert the items manually to overide the namespace clash check from the base namespace. Register functions decorated with either @sanity_function or @performance_variables or @performance_function decorators. Register the final methods Register the hooks - if a value does not meet the conditions it will be simply ignored Handle variable access Handle parameter access As the last resource, look if key is a variable in any of the base classes. If so, make its value available in the current class' namespace. Store a deep-copy of the variable's value and return. If 'key' is neither a variable nor a parameter, raise the exception from the base __getitem__. Keep reference to the bases inside the namespace Regression test parameter space defined at the class level Directive to insert a regression test parameter directly in the class body as: `P0 = parameter([0,1,2,3])`. Regression test var space defined at the class level Directives to add/modify a regression test variable Utility decorators Track the imported external functions Hook-related functionality Machinery to add a sanity function Machinery to add performance functions Reset the external functions imported through the bind directive. Create a set with the attribute names already in use. Build the var space and extend the target namespace Build the parameter space Update used names set with the local __dict__ Update the hook registry with the bases Search the bases if no local sanity functions exist. Update the performance function dict with the bases. Add the final functions from its parents Intercept constructor arguments Insert the var & param spaces If the variable is already injected, delegate lookup to __getattr__. Default back to the base method if no special treatment required. Variable is being injected Try to treat `name` as variable Try to treat `name` as a parameter Catch attempts to override a test parameter Treat `name` as normal class attribute | 8,113 | en | 0.780836 |
import time
from check_lang import check_py,check_rb,check_j,check_c,check_cpp
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import subprocess
import json
from json import JSONEncoder
from main import predict
app = Flask(__name__)
CORS(app)
@app.route("/")
def hello():
return '<form action="/check" method="POST"><input name="code" size="135"><input type="submit" value="Code Here"></form>'
@app.route("/check", methods=['POST'])
def echo():
codes = []
filename = str(int(time.time()))
dataDict = json.loads(request.data)
# print dataDict
# print "------------"
with open('code/'+filename,'w+') as outfile:
outfile.write(str(dataDict['sc']))
codes.append(int(check_c("code/"+filename)))
codes.append(int(check_cpp("code/"+filename)))
codes.append(int(check_py("code/"+filename)))
codes.append(int(check_rb("code/"+filename)))
codes.append(1)
print codes
zero = 0
count = 0
correct_count = 0
for code in codes:
count = count+1
if code==0:
zero = zero + 1
correct_count = count
print zero
if(zero == 1):
if(correct_count==1):
jsonString = {'cpp': 0.0, 'ruby': 0.0, 'c': 1.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==2):
jsonString = {'cpp': 1.0, 'ruby': 0.0, 'c': 0.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==3):
jsonString = {'cpp': 0.0, 'ruby': 0.0, 'c': 0.0, 'py': 1.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==4):
jsonString = {'cpp': 0.0, 'ruby': 1.0, 'c': 0.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
else:
x = predict(dataDict['sc'])
print x
# return JSONEncoder().encode(x)
return jsonify({'cpp': round(x['cpp'], 2), 'ruby': round(x['ruby'], 2), 'c': round(x['c'], 2), 'py': round(x['py'], 2), 'java': round(x['java'], 2)})
#if score of cpp is eqgreater than 0.5 then run it to check if it runs then cpp else java
# sa = []
# score_cpp = x['cpp']
# score_ruby = x['ruby']
# score_c = x['c']
# score_py = x['py']
# score_java = x['java']
#
# sa.append(score_c)
# sa.append(score_cpp)
# sa.append(score_java)
# sa.append(score_py)
# sa.append(score_ruby)
#
# print sa
# return ''.join([str(code) for code in codes])+" "+str(x)
if __name__ == "__main__":
app.run(host= '0.0.0.0')
| server/home.py | 2,648 | print dataDict print "------------" return JSONEncoder().encode(x)if score of cpp is eqgreater than 0.5 then run it to check if it runs then cpp else java sa = [] score_cpp = x['cpp'] score_ruby = x['ruby'] score_c = x['c'] score_py = x['py'] score_java = x['java'] sa.append(score_c) sa.append(score_cpp) sa.append(score_java) sa.append(score_py) sa.append(score_ruby) print sa return ''.join([str(code) for code in codes])+" "+str(x) | 435 | en | 0.321013 |
import numpy as np
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class InternalEnergyEquation(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, fext, intc, tke_diss, data_prefix):
super(InternalEnergyEquation, self).__init__(ig)
# load data to structured array
eht = self.customLoad(filename)
# load grid
xzn0 = self.getRAdata(eht, 'xzn0')
nx = self.getRAdata(eht, 'nx')
# pick equation-specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd = self.getRAdata(eht, 'dd')[intc]
ux = self.getRAdata(eht, 'ux')[intc]
pp = self.getRAdata(eht, 'pp')[intc]
ddux = self.getRAdata(eht, 'ddux')[intc]
ddei = self.getRAdata(eht, 'ddei')[intc]
ddeiux = self.getRAdata(eht, 'ddeiux')[intc]
divu = self.getRAdata(eht, 'divu')[intc]
ppdivu = self.getRAdata(eht, 'ppdivu')[intc]
ddenuc1 = self.getRAdata(eht, 'ddenuc1')[intc]
ddenuc2 = self.getRAdata(eht, 'ddenuc2')[intc]
# store time series for time derivatives
t_timec = self.getRAdata(eht, 'timec')
t_dd = self.getRAdata(eht, 'dd')
t_ddei = self.getRAdata(eht, 'ddei')
t_fht_ei = t_ddei / t_dd
# construct equation-specific mean fields
fht_ux = ddux / dd
fht_ei = ddei / dd
fei = ddeiux - ddux * ddei / dd
##########################
# INTERNAL ENERGY EQUATION
##########################
# LHS -dq/dt
self.minus_dt_dd_fht_ei = -self.dt(t_dd * t_fht_ei, xzn0, t_timec, intc)
# LHS -div dd fht_ux fht_ei
self.minus_div_dd_fht_ux_fht_ei = -self.Div(dd * fht_ux * fht_ei, xzn0)
# RHS -div fei
self.minus_div_fei = -self.Div(fei, xzn0)
# RHS -div ftt (not included) heat flux
self.minus_div_ftt = -np.zeros(nx)
# RHS -P d = - pp Div ux
self.minus_pp_div_ux = -pp * self.Div(ux, xzn0)
# RHS -Wp = -eht_ppf_df
self.minus_eht_ppf_df = -(ppdivu - pp * divu)
# RHS source + dd enuc
self.plus_dd_fht_enuc = ddenuc1 + ddenuc2
# RHS dissipated turbulent kinetic energy
self.plus_disstke = +tke_diss
# -res
self.minus_resEiEquation = -(self.minus_dt_dd_fht_ei + self.minus_div_dd_fht_ux_fht_ei +
self.minus_div_fei + self.minus_div_ftt + self.minus_pp_div_ux + self.minus_eht_ppf_df +
self.plus_dd_fht_enuc + self.plus_disstke)
##############################
# END INTERNAL ENERGY EQUATION
##############################
# assign global data to be shared across whole class
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.fht_ei = fht_ei
self.fext = fext
def plot_ei(self, LAXIS, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
"""Plot mean Favrian internal energy stratification in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(InternalEnergyEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
# load DATA to plot
plt1 = self.fht_ei
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title(r'internal energy')
plt.plot(grd1, plt1, color='brown', label=r'$\widetilde{\varepsilon}_I$')
# convective boundary markers
plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"$\widetilde{\varepsilon}_I$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"$\widetilde{\varepsilon}_I$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_ei.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_ei.eps')
def plot_ei_equation(self, LAXIS, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
"""Plot internal energy equation in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(InternalEnergyEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
lhs0 = self.minus_dt_dd_fht_ei
lhs1 = self.minus_div_dd_fht_ux_fht_ei
rhs0 = self.minus_div_fei
rhs1 = self.minus_div_ftt
rhs2 = self.minus_pp_div_ux
rhs3 = self.minus_eht_ppf_df
rhs4 = self.plus_dd_fht_enuc
rhs5 = self.plus_disstke
res = self.minus_resEiEquation
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [lhs0, lhs1, rhs0, rhs1, rhs2, rhs3, rhs4, rhs5, res]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('internal energy equation')
if self.ig == 1:
plt.plot(grd1, lhs0, color='#FF6EB4', label=r"$-\partial_t (\overline{\rho} \widetilde{\epsilon}_I )$")
plt.plot(grd1, lhs1, color='k', label=r"$-\nabla_x (\overline{\rho}\widetilde{u}_x \widetilde{\epsilon}_I$)")
plt.plot(grd1, rhs0, color='#FF8C00', label=r"$-\nabla_x f_I $")
plt.plot(grd1, rhs1, color='c', label=r"$-\nabla_x f_T$ (not incl.)")
plt.plot(grd1, rhs2, color='#802A2A', label=r"$-\bar{P} \bar{d}$")
plt.plot(grd1, rhs3, color='r', label=r"$-W_P$")
plt.plot(grd1, rhs4, color='b', label=r"$+\overline{\rho}\widetilde{\epsilon}_{nuc}$")
plt.plot(grd1, rhs5, color='m', label=r"$+\varepsilon_k$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N_\epsilon$")
elif self.ig == 2:
plt.plot(grd1, lhs0, color='#FF6EB4', label=r"$-\partial_t (\overline{\rho} \widetilde{\epsilon}_I )$")
plt.plot(grd1, lhs1, color='k', label=r"$-\nabla_r (\overline{\rho}\widetilde{u}_r \widetilde{\epsilon}_I$)")
plt.plot(grd1, rhs0, color='#FF8C00', label=r"$-\nabla_r f_I $")
plt.plot(grd1, rhs1, color='c', label=r"$-\nabla_r f_T$ (not incl.)")
plt.plot(grd1, rhs2, color='#802A2A', label=r"$-\bar{P} \bar{d}$")
plt.plot(grd1, rhs3, color='r', label=r"$-W_P$")
plt.plot(grd1, rhs4, color='b', label=r"$+\overline{\rho}\widetilde{\epsilon}_{nuc}$")
plt.plot(grd1, rhs5, color='m', label=r"$+\varepsilon_k$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N_\epsilon$")
# convective boundary markers
plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"erg cm$^{-3}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"erg cm$^{-3}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 10}, ncol=2)
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'ei_eq.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'ei_eq.eps') | EQUATIONS/InternalEnergyEquation.py | 8,813 | Plot mean Favrian internal energy stratification in the model
Plot internal energy equation in the model
Theoretical background https://arxiv.org/abs/1401.5176 Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field Equations in Spherical Geometry and their Application to Turbulent Stellar Convection Data load data to structured array load grid pick equation-specific Reynolds-averaged mean fields according to: https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf store time series for time derivatives construct equation-specific mean fields INTERNAL ENERGY EQUATION LHS -dq/dt LHS -div dd fht_ux fht_ei RHS -div fei RHS -div ftt (not included) heat flux RHS -P d = - pp Div ux RHS -Wp = -eht_ppf_df RHS source + dd enuc RHS dissipated turbulent kinetic energy -res END INTERNAL ENERGY EQUATION assign global data to be shared across whole class load x GRID load DATA to plot create FIGURE format AXIS, make sure it is exponential set plot boundaries plot DATA convective boundary markers define and show x/y LABELS show LEGEND display PLOT save PLOT load x GRID create FIGURE format AXIS, make sure it is exponential set plot boundaries plot DATA convective boundary markers define and show x/y LABELS show LEGEND display PLOT save PLOT | 1,313 | en | 0.715425 |
#-----------------------------------------------------------------------------
# Copyright (c) 2015-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# netCDF4 (tested with v.1.1.9) has some hidden imports
hiddenimports = ['netCDF4.utils', 'netcdftime']
| venv/Lib/site-packages/PyInstaller/hooks/hook-netCDF4.py | 514 | ----------------------------------------------------------------------------- Copyright (c) 2015-2017, PyInstaller Development Team. Distributed under the terms of the GNU General Public License with exception for distributing bootloader. The full license is in the file COPYING.txt, distributed with this software.----------------------------------------------------------------------------- netCDF4 (tested with v.1.1.9) has some hidden imports | 446 | en | 0.677862 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) Ostap developers.
# =============================================================================
# @file test_fitting_efficiency.py
# Test module for ostap/fitting/efficiency.py
# =============================================================================
""" Test module for ostap/fitting/efficiency.py
"""
# =============================================================================
__author__ = "Ostap developers"
__all__ = () ## nothing to import
# =============================================================================
import ROOT, random, math
import ostap.fitting.roofit
import ostap.fitting.models as Models
from ostap.core.core import cpp, VE, dsID, Ostap
from ostap.logger.utils import rooSilent
from ostap.fitting.efficiency import Efficiency1D
from ostap.utils.timing import timing
from ostap.plotting.canvas import use_canvas
from ostap.utils.utils import wait
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ or '__builtin__' == __name__ :
logger = getLogger ( 'test_fitting_efficiency' )
else :
logger = getLogger ( __name__ )
# =============================================================================
## make
x = ROOT.RooRealVar ( 'x', 'test' , 0 , 10 )
xmin , xmax = x.minmax()
acc = ROOT.RooCategory( 'cut','cut')
acc.defineType('accept',1)
acc.defineType('reject',0)
varset = ROOT.RooArgSet ( x , acc )
ds = ROOT.RooDataSet ( dsID() , 'test data' , varset )
eff0 = Models.Monotonic_pdf ( 'E0' , xvar = x , power = 3 , increasing = True )
eff0.phis = 3.1415/1 , 3.1415/2 , 3.1415/3
margin = 1.25
emax = margin * eff0 ( x.getMax() )
N = 20000
for i in range ( N ) :
xv = random.uniform ( xmin , xmax )
x.setVal ( xv )
ev = random.uniform ( 0 , emax )
if eff0( xv ) > ev : acc.setIndex(1)
else : acc.setIndex(0)
ds.add ( varset )
np = 20
dx = (xmax-xmin)/np
points = [ dx * i for i in range ( np + 1 ) ]
# =================================================================================
## make comparison table
def make_table ( func , title , prefix = "# ") :
rows = [ ( 'x' , 'fitted eff [%]' , 'true eff [%]' , 'delta [%]' ) ]
for p in points :
e1 = 100 * func ( p , error = True )
e2 = 100 * eff0 ( p ) / emax
d = e1 - e2
row = "%4.2f" % p , \
"%s" % e1.toString ( '(%5.2f+-%4.2f)' ) ,\
"%.2f" % e2 ,\
"%s" % d .toString ( '(%5.2f+-%4.2f)' )
rows.append ( row )
from ostap.logger.table import table
return table ( rows , title = title , prefix = prefix )
# =============================================================================
# use some PDF to parameterize efficiciency
def test_pdf () :
logger = getLogger ( 'test_pdf' )
effPdf = Models.Monotonic_pdf ( 'P6' , xvar = x , power = 4 , increasing = True )
maxe = margin * effPdf ( xmax )
s0 = min ( 1.0 / emax , 1.0 / maxe )
scale = ROOT.RooRealVar ( 'scaleX' , 'scaleX' , s0 , 0.2 * s0 , 5.0 * s0 )
eff2 = Efficiency1D ( 'E2' , effPdf , cut = acc , scale = scale )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-Monotonic_pdf \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using Monotonic_pdf)\n%s" % make_table (
eff2 , title = 'using Monotonic_pdf') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
# use some functions to parameterize efficiciency
def test_vars1 () :
from ostap.fitting.roofuncs import BernsteinPoly as BP
logger = getLogger ( 'test_vars1' )
f = BP ( 'G' , xvar = x , power = 4 )
f.pars = 0.2 , 0.2 , 0.2 , 0.2
eff2 = Efficiency1D ( 'E3' , f.fun , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-BernsteinPoly \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using BernsteinPoly)\n%s" % make_table (
eff2 , title = 'using BernsteinPoly') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
# use some functions to parameterize efficiciency
def test_vars2 () :
logger = getLogger ( 'test_vars2' )
from ostap.fitting.roofuncs import MonotonicPoly as MP
f = MP ( 'G' , xvar = x , increasing = True , power = 4 )
f.pars = 0.6 , 0.8 , -0.1 , -0.6
f.a = 0.06
f.b = 2.72
f.a.release ()
f.b.release ()
eff2 = Efficiency1D ( 'E4' , f , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-MonotonicPoly \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using MonotonicPoly)\n%s" % make_table (
eff2 , title = 'using MonotonicPoly') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
# use some functions to parameterize efficiciency
def test_vars3 () :
logger = getLogger ( 'test_vars3' )
a = ROOT.RooRealVar ( 'A', 'a' , 0.05 , 0 , 1 )
b = ROOT.RooRealVar ( 'B', 'b' , 0.02 , -0.05 , 0.1 )
c = ROOT.RooRealVar ( 'C', 'c' , 0.005 , 0 , 0.1 )
import ostap.fitting.roofuncs as R
from ostap.fitting.funbasic import Fun1D
X = Fun1D ( x , xvar = x , name = 'X' )
##F = (X**2) * c + X * b + a
F = a + b * X + c * X**2
eff2 = Efficiency1D ( 'E5' , F , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-Fun1D \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using Fun1D)\n%s" % make_table (
eff2 , title = 'using Fnu1D') )
with wait ( 2 ) , use_canvas ( 'test_vars3' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
if '__main__' == __name__ :
with timing ("PDF" , logger ) :
test_pdf ()
with timing ("Vars1" , logger ) :
test_vars1 ()
with timing ("Vars2" , logger ) :
test_vars2 ()
with timing ("Vars3" , logger ) :
test_vars3 ()
# =============================================================================
## The END
# =============================================================================
| ostap/fitting/tests/test_fitting_efficiency.py | 7,278 | Test module for ostap/fitting/efficiency.py
!/usr/bin/env python -*- coding: utf-8 -*- ============================================================================= Copyright (c) Ostap developers. ============================================================================= @file test_fitting_efficiency.py Test module for ostap/fitting/efficiency.py ============================================================================= ============================================================================= nothing to import ============================================================================= ============================================================================= logging ============================================================================= ============================================================================= make ================================================================================= make comparison table ============================================================================= use some PDF to parameterize efficiciency ============================================================================= use some functions to parameterize efficiciency ============================================================================= use some functions to parameterize efficiciency ============================================================================= use some functions to parameterize efficiciencyF = (X**2) * c + X * b + a ============================================================================= ============================================================================= The END ============================================================================= | 1,803 | en | 0.366142 |
#
# @file TestConstraint_newSetters.py
# @brief Constraint unit tests for new set function API
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# $Id$
# $HeadURL$
#
# This test file was converted from src/sbml/test/TestConstraint_newSetters.c
# with the help of conversion sciprt (ctest_converter.pl).
#
#<!---------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2009 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
#--------------------------------------------------------------------------->*/
import sys
import unittest
import libsbml
class TestConstraint_newSetters(unittest.TestCase):
C = None
def setUp(self):
self.C = libsbml.Constraint(2,4)
if (self.C == None):
pass
pass
def tearDown(self):
self.C = None
pass
def test_Constraint_setMath1(self):
math = libsbml.parseFormula("2 * k")
i = self.C.setMath(math)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.getMath() != math )
self.assertEqual( True, self.C.isSetMath() )
i = self.C.setMath(None)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.getMath() == None )
self.assertEqual( False, self.C.isSetMath() )
math = None
pass
def test_Constraint_setMath2(self):
math = libsbml.ASTNode(libsbml.AST_TIMES)
i = self.C.setMath(math)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assertEqual( False, self.C.isSetMath() )
math = None
pass
def test_Constraint_setMessage1(self):
node = libsbml.XMLNode()
i = self.C.setMessage(node)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assert_( self.C.isSetMessage() == False )
i = self.C.unsetMessage()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.C.isSetMessage() )
if (self.C.getMessage() != None):
pass
node = None
pass
def test_Constraint_setMessage2(self):
text = libsbml.XMLNode.convertStringToXMLNode(" Some text ",None)
triple = libsbml.XMLTriple("p", "http://www.w3.org/1999/xhtml", "")
att = libsbml.XMLAttributes()
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.w3.org/1999/xhtml", "")
p = libsbml.XMLNode(triple,att,xmlns)
p.addChild(text)
triple1 = libsbml.XMLTriple("message", "", "")
att1 = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple1,att1)
node.addChild(p)
i = self.C.setMessage(node)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.isSetMessage() == True )
i = self.C.unsetMessage()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.C.isSetMessage() )
if (self.C.getMessage() != None):
pass
node = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestConstraint_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| external/sbml/bindings/python/test/sbml/TestConstraint_newSetters.py | 3,756 | @file TestConstraint_newSetters.py @brief Constraint unit tests for new set function API @author Akiya Jouraku (Python conversion) @author Sarah Keating
$Id$ $HeadURL$ This test file was converted from src/sbml/test/TestConstraint_newSetters.c with the help of conversion sciprt (ctest_converter.pl).<!--------------------------------------------------------------------------- This file is part of libSBML. Please visit http://sbml.org for more information about SBML, and the latest version of libSBML. Copyright 2005-2009 California Institute of Technology. Copyright 2002-2005 California Institute of Technology and Japan Science and Technology Corporation. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation. A copy of the license agreement is provided in the file named "LICENSE.txt" included with this software distribution and also available online as http://sbml.org/software/libsbml/license.html--------------------------------------------------------------------------->*/ | 1,138 | en | 0.763339 |
#!/usr/bin/env python3
import sys
import os
import argparse
scriptpath = os.path.abspath(os.path.dirname(__file__))
includepath = os.path.dirname(scriptpath)
sys.path.insert(0, includepath)
from audio.audiofilefactory import AudioFileFactory
from audio.audioconversionservice import AudioConversionService
from filesystem.filelist import FileList
parser = argparse.ArgumentParser(description="Convert music files", epilog="File types are auto-derived from the filename extensions.")
parser.add_argument("source_path", help="The source path")
parser.add_argument("destination_path", help="The destination path")
parser.add_argument("list_of_favourites", help="The list of favourites")
args = parser.parse_args()
source_path = args.source_path
destination_path = args.destination_path
list_of_favourites = args.list_of_favourites
with open(list_of_favourites) as f:
content = f.readlines()
content = [x.strip() for x in content]
factory = AudioFileFactory()
for favourite in content:
statvfs = os.statvfs(destination_path)
free_space = statvfs.f_bavail * statvfs.f_bsize
print("Space left: " + str(free_space / 1024 / 1024 / 1024) + " Gb")
if free_space < 700 * 1024 * 1024:
print("Skipping " + favourite + ", less than 700 Mb left on device (" + str(free_space / 1024 / 1024) + " Mb)")
continue
target_dir = os.path.join(destination_path, favourite)
if os.path.isdir(target_dir):
print("Skipping " + favourite + ", path already exists")
continue
os.mkdir(target_dir)
list = FileList(None, factory)
list.add_path_to_list(os.path.join(source_path, favourite))
for f in list:
source_file_path = f.get_path()
destination_file_path = os.path.join(target_dir, os.path.splitext(os.path.basename(source_file_path))[0] + ".wav")
destination_file = factory.create_file(destination_file_path)
AudioConversionService().convert_audio_file(f, destination_file)
| bin/convertfavourites.py | 1,905 | !/usr/bin/env python3 | 21 | fr | 0.448822 |
from pymavlink import mavutil
#import time
mavutil.set_dialect("video_monitor")
# create a connection to FMU
hoverGames = mavutil.mavlink_connection("/dev/ttymxc2", baud=921600)
# wait for the heartbeat message to find the system id
hoverGames.wait_heartbeat()
print("Heartbeat from system (system %u component %u)" %(hoverGames.target_system, hoverGames.target_component))
while (True) :
msg = hoverGames.recv_match(type='VIDEO_MONITOR', blocking=True)
#check that the message is valid before attempting to use it
if not msg:
print('No message!\n')
continue
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
else:
#Message is valid, so use the attribute
print('Info: %s' % msg.info)
print('Latitude : %d' % msg.lat)
print('Longitude: %d' % msg.lon)
print('No.people: %d' % msg.no_people)
print('Confidence: %f' % msg.confidence)
print('\n')
#time.sleep(1.0):
| 02_commCustom/receiveCustomMavlinkMSG.py | 1,076 | import time create a connection to FMU wait for the heartbeat message to find the system idcheck that the message is valid before attempting to use itMessage is valid, so use the attributetime.sleep(1.0): | 205 | en | 0.653234 |
import numpy as np
from .utils import make_dir
class Evaluater(object):
def __init__(self, logger, size, original_size, tag='paper_figure'):
self.pixel_spaceing = 0.1
self.tag = tag
make_dir(tag)
self.tag += '/'
self.logger = logger
self.scale_rate_y = original_size[0] / size[0]
self.scale_rate_x = original_size[1] / size[1]
self.RE_list = list()
self.recall_radius = [2, 2.5, 3, 4] # 2mm etc
self.recall_rate = list()
self.Attack_RE_list = list()
self.Defend_RE_list = list()
self.dict_Attack = dict()
self.dict_Defend = dict()
self.total_list = dict()
self.mode_list = [0, 1, 2, 3]
self.mode_dict = {0: "Iterative FGSM", 1: "Adaptive Iterative FGSM", \
2: "Adaptive_Rate", 3: "Proposed"}
for mode in self.mode_list:
self.dict_Defend[mode] = dict()
self.dict_Attack[mode] = dict()
self.total_list[mode] = list()
self.best_mre = 100.0
def reset(self):
self.RE_list.clear()
for mode in self.mode_list:
self.dict_Defend[mode] = dict()
self.dict_Attack[mode] = dict()
self.total_list[mode] = list()
self.Attack_RE_list.clear()
self.Defend_RE_list.clear()
def record(self, pred, landmark):
# n = batchsize = 1
# pred : list[ c(y) ; c(x) ]
# landmark: list [ (x , y) * c]
c = pred[0].shape[0]
diff = np.zeros([c, 2], dtype=float) # y, x
for i in range(c):
diff[i][0] = abs(pred[0][i] - landmark[i][1]) * self.scale_rate_y
diff[i][1] = abs(pred[1][i] - landmark[i][0]) * self.scale_rate_x
Radial_Error = np.sqrt(np.power(diff[:, 0], 2) + np.power(diff[:, 1], 2))
Radial_Error *= self.pixel_spaceing
self.RE_list.append(Radial_Error)
# for i in range(len(Radial_Error)):
# if Radial_Error[i] > 10:
# print("Landmark {} RE {}".format(i, Radial_Error[i]))
# if Radial_Error.max() > 10:
# return Radial_Error.argmax()
return None
def record_attack(self, pred, landmark, attack_list, mode=0, iteration=0):
# n = batchsize = 1
# pred : list[ c(y) ; c(x) ]
# landmark: list [ (x , y) * c]
assert (mode in [0, 1, 2, 3])
c = pred[0].shape[0]
diff = np.zeros([c, 2], dtype=float) # y, x
attack_temp = list()
defend_temp = list()
for i in range(c):
diff[i][0] = abs(pred[0][i] - landmark[i][1]) * self.scale_rate_y
diff[i][1] = abs(pred[1][i] - landmark[i][0]) * self.scale_rate_x
Radial_Error = np.sqrt(np.power(diff[i, 0], 2) + np.power(diff[i, 1], 2))
if i in attack_list:
attack_temp.append([i, Radial_Error * self.pixel_spaceing])
else:
defend_temp.append([i, Radial_Error * self.pixel_spaceing])
if iteration not in self.dict_Attack[mode].keys():
self.dict_Attack[mode][iteration] = list()
self.dict_Attack[mode][iteration].append(attack_temp)
if iteration not in self.dict_Defend[mode].keys():
self.dict_Defend[mode][iteration] = list()
self.dict_Defend[mode][iteration].append(defend_temp)
def cal_metrics(self, ex=False):
# calculate MRE SDR
temp = np.array(self.RE_list)
Mean_RE_channel = temp.mean(axis=0)
self.logger.info(Mean_RE_channel)
# with open('./tmp/results.csv', 'w') as f:
# writer = csv.writer(f)
# writer.writerow(Mean_RE_channel.tolist())
mre = Mean_RE_channel.mean()
self.logger.info("ALL MRE {}".format(mre))
for radius in self.recall_radius:
total = temp.size
shot = (temp < radius).sum()
self.logger.info("ALL SDR {}mm {}".format\
(radius, shot * 100 / total))
if ex:
return mre, None
return mre
| utils/eval.py | 4,215 | 2mm etc n = batchsize = 1 pred : list[ c(y) ; c(x) ] landmark: list [ (x , y) * c] y, x for i in range(len(Radial_Error)): if Radial_Error[i] > 10: print("Landmark {} RE {}".format(i, Radial_Error[i])) if Radial_Error.max() > 10: return Radial_Error.argmax() n = batchsize = 1 pred : list[ c(y) ; c(x) ] landmark: list [ (x , y) * c] y, x calculate MRE SDR with open('./tmp/results.csv', 'w') as f: writer = csv.writer(f) writer.writerow(Mean_RE_channel.tolist()) | 487 | en | 0.347715 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from tempest.common import compute
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib.common import api_microversion_fixture
from tempest.lib.common import api_version_request
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest,
tempest.test.BaseTestCase):
"""Base test case class for all Compute API tests."""
force_tenant_isolation = False
# Set this to True in subclasses to create a default network. See
# https://bugs.launchpad.net/tempest/+bug/1844568
create_default_network = False
# TODO(andreaf) We should care also for the alt_manager here
# but only once client lazy load in the manager is done
credentials = ['primary']
@classmethod
def skip_checks(cls):
super(BaseV2ComputeTest, cls).skip_checks()
if not CONF.service_available.nova:
raise cls.skipException("Nova is not available")
api_version_utils.check_skip_with_microversion(
cls.min_microversion, cls.max_microversion,
CONF.compute.min_microversion, CONF.compute.max_microversion)
api_version_utils.check_skip_with_microversion(
cls.volume_min_microversion, cls.volume_max_microversion,
CONF.volume.min_microversion, CONF.volume.max_microversion)
api_version_utils.check_skip_with_microversion(
cls.placement_min_microversion, cls.placement_max_microversion,
CONF.placement.min_microversion, CONF.placement.max_microversion)
@classmethod
def setup_credentials(cls):
# Setting network=True, subnet=True creates a default network
cls.set_network_resources(
network=cls.create_default_network,
subnet=cls.create_default_network)
super(BaseV2ComputeTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(BaseV2ComputeTest, cls).setup_clients()
cls.servers_client = cls.os_primary.servers_client
cls.server_groups_client = cls.os_primary.server_groups_client
cls.flavors_client = cls.os_primary.flavors_client
cls.compute_images_client = cls.os_primary.compute_images_client
cls.extensions_client = cls.os_primary.extensions_client
cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client
cls.floating_ips_client = cls.os_primary.compute_floating_ips_client
cls.keypairs_client = cls.os_primary.keypairs_client
cls.security_group_rules_client = (
cls.os_primary.compute_security_group_rules_client)
cls.security_groups_client =\
cls.os_primary.compute_security_groups_client
cls.quotas_client = cls.os_primary.quotas_client
cls.compute_networks_client = cls.os_primary.compute_networks_client
cls.limits_client = cls.os_primary.limits_client
cls.volumes_extensions_client =\
cls.os_primary.volumes_extensions_client
cls.snapshots_extensions_client =\
cls.os_primary.snapshots_extensions_client
cls.interfaces_client = cls.os_primary.interfaces_client
cls.fixed_ips_client = cls.os_primary.fixed_ips_client
cls.availability_zone_client = cls.os_primary.availability_zone_client
cls.agents_client = cls.os_primary.agents_client
cls.aggregates_client = cls.os_primary.aggregates_client
cls.services_client = cls.os_primary.services_client
cls.instance_usages_audit_log_client = (
cls.os_primary.instance_usages_audit_log_client)
cls.hypervisor_client = cls.os_primary.hypervisor_client
cls.certificates_client = cls.os_primary.certificates_client
cls.migrations_client = cls.os_primary.migrations_client
cls.security_group_default_rules_client = (
cls.os_primary.security_group_default_rules_client)
cls.versions_client = cls.os_primary.compute_versions_client
if CONF.service_available.cinder:
cls.volumes_client = cls.os_primary.volumes_client_latest
cls.attachments_client = cls.os_primary.attachments_client_latest
cls.snapshots_client = cls.os_primary.snapshots_client_latest
if CONF.service_available.glance:
if CONF.image_feature_enabled.api_v1:
cls.images_client = cls.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
cls.images_client = cls.os_primary.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
cls._check_depends_on_nova_network()
@classmethod
def _check_depends_on_nova_network(cls):
# Since nova-network APIs were removed from Nova in the Rocky release,
# determine, based on the max version from the version document, if
# the compute API is >Queens and if so, skip tests that rely on
# nova-network.
if not getattr(cls, 'depends_on_nova_network', False):
return
versions = cls.versions_client.list_versions()['versions']
# Find the v2.1 version which will tell us our max version for the
# compute API we're testing against.
for version in versions:
if version['id'] == 'v2.1':
max_version = api_version_request.APIVersionRequest(
version['version'])
break
else:
LOG.warning(
'Unable to determine max v2.1 compute API version: %s',
versions)
return
# The max compute API version in Queens is 2.60 so we cap
# at that version.
queens = api_version_request.APIVersionRequest('2.60')
if max_version > queens:
raise cls.skipException('nova-network is gone')
@classmethod
def resource_setup(cls):
super(BaseV2ComputeTest, cls).resource_setup()
cls.request_microversion = (
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
cls.volume_request_microversion = (
api_version_utils.select_request_microversion(
cls.volume_min_microversion,
CONF.volume.min_microversion))
cls.placement_request_microversion = (
api_version_utils.select_request_microversion(
cls.placement_min_microversion,
CONF.placement.min_microversion))
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.image_ref = CONF.compute.image_ref
cls.image_ref_alt = CONF.compute.image_ref_alt
cls.flavor_ref = CONF.compute.flavor_ref
cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
cls.ssh_user = CONF.validation.image_ssh_user
cls.ssh_alt_user = CONF.validation.image_alt_ssh_user
cls.image_ssh_user = CONF.validation.image_ssh_user
cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user
cls.image_ssh_password = CONF.validation.image_ssh_password
cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password
@classmethod
def is_requested_microversion_compatible(cls, max_version):
"""Check the compatibility of selected request microversion
This method will check if selected request microversion
(cls.request_microversion) for test is compatible with respect
to 'max_version'. Compatible means if selected request microversion
is in the range(<=) of 'max_version'.
:param max_version: maximum microversion to compare for compatibility.
Example: '2.30'
:returns: True if selected request microversion is compatible with
'max_version'. False in other case.
"""
try:
req_version_obj = api_version_request.APIVersionRequest(
cls.request_microversion)
# NOTE(gmann): This is case where this method is used before calling
# resource_setup(), where cls.request_microversion is set. There may
# not be any such case but still we can handle this case.
except AttributeError:
request_microversion = (
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
req_version_obj = api_version_request.APIVersionRequest(
request_microversion)
max_version_obj = api_version_request.APIVersionRequest(max_version)
return req_version_obj <= max_version_obj
@classmethod
def server_check_teardown(cls):
"""Checks is the shared server clean enough for subsequent test.
Method will delete the server when it's dirty.
The setUp method is responsible for creating a new server.
Exceptions raised in tearDown class are fails the test case,
This method supposed to use only by tearDown methods, when
the shared server_id is stored in the server_id of the class.
"""
if getattr(cls, 'server_id', None) is not None:
try:
waiters.wait_for_server_status(cls.servers_client,
cls.server_id, 'ACTIVE')
except Exception as exc:
LOG.exception(exc)
cls.servers_client.delete_server(cls.server_id)
waiters.wait_for_server_termination(cls.servers_client,
cls.server_id)
cls.server_id = None
raise
@classmethod
def create_test_server(cls, validatable=False, volume_backed=False,
validation_resources=None, clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
:param validatable: Whether the server will be pingable or sshable.
:param volume_backed: Whether the instance is volume backed or not.
:param validation_resources: Dictionary of validation resources as
returned by `get_class_validation_resources`.
:param clients: Client manager, defaults to os_primary.
:param kwargs: Extra arguments are passed down to the
`compute.create_test_server` call.
"""
if 'name' not in kwargs:
kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server")
request_version = api_version_request.APIVersionRequest(
cls.request_microversion)
v2_37_version = api_version_request.APIVersionRequest('2.37')
tenant_network = cls.get_tenant_network()
# NOTE(snikitin): since microversion v2.37 'networks' field is required
if (request_version >= v2_37_version and 'networks' not in kwargs and
not tenant_network):
kwargs['networks'] = 'none'
if clients is None:
clients = cls.os_primary
body, servers = compute.create_test_server(
clients,
validatable,
validation_resources=validation_resources,
tenant_network=tenant_network,
volume_backed=volume_backed,
**kwargs)
# For each server schedule wait and delete, so we first delete all
# and then wait for all
for server in servers:
cls.addClassResourceCleanup(waiters.wait_for_server_termination,
clients.servers_client, server['id'])
for server in servers:
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
clients.servers_client.delete_server, server['id'])
return body
@classmethod
def create_security_group(cls, name=None, description=None):
if name is None:
name = data_utils.rand_name(cls.__name__ + "-securitygroup")
if description is None:
description = data_utils.rand_name('description')
body = cls.security_groups_client.create_security_group(
name=name, description=description)['security_group']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.security_groups_client.delete_security_group,
body['id'])
return body
@classmethod
def create_test_server_group(cls, name="", policy=None):
if not name:
name = data_utils.rand_name(cls.__name__ + "-Server-Group")
if policy is None:
policy = ['affinity']
body = cls.server_groups_client.create_server_group(
name=name, policies=policy)['server_group']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.server_groups_client.delete_server_group,
body['id'])
return body
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
condition()
except Exception:
pass
else:
return
if int(time.time()) - start_time >= self.build_timeout:
condition()
return
time.sleep(self.build_interval)
@classmethod
def prepare_instance_network(cls):
if (CONF.validation.auth_method != 'disabled' and
CONF.validation.connect_method == 'floating'):
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
"""Wrapper utility that returns an image created from the server.
If compute microversion >= 2.36, the returned image response will
be from the image service API rather than the compute image proxy API.
"""
name = kwargs.pop('name',
data_utils.rand_name(cls.__name__ + "-image"))
wait_until = kwargs.pop('wait_until', None)
wait_for_server = kwargs.pop('wait_for_server', True)
image = cls.compute_images_client.create_image(server_id, name=name,
**kwargs)
if api_version_utils.compare_version_header_to_response(
"OpenStack-API-Version", "compute 2.45", image.response, "lt"):
image_id = image['image_id']
else:
image_id = data_utils.parse_image_id(image.response['location'])
# The compute image proxy APIs were deprecated in 2.35 so
# use the images client directly if the API microversion being
# used is >=2.36.
if not cls.is_requested_microversion_compatible('2.35'):
client = cls.images_client
else:
client = cls.compute_images_client
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_image, image_id)
if wait_until is not None:
try:
wait_until = wait_until.upper()
if not cls.is_requested_microversion_compatible('2.35'):
wait_until = wait_until.lower()
waiters.wait_for_image_status(client, image_id, wait_until)
except lib_exc.NotFound:
if wait_until.upper() == 'ACTIVE':
# If the image is not found after create_image returned
# that means the snapshot failed in nova-compute and nova
# deleted the image. There should be a compute fault
# recorded with the server in that case, so get the server
# and dump some details.
server = (
cls.servers_client.show_server(server_id)['server'])
if 'fault' in server:
raise exceptions.SnapshotNotFoundException(
server['fault'], image_id=image_id)
else:
raise exceptions.SnapshotNotFoundException(
image_id=image_id)
else:
raise
image = client.show_image(image_id)
# Compute image client returns response wrapped in 'image' element
# which is not the case with Glance image client.
if 'image' in image:
image = image['image']
if wait_until.upper() == 'ACTIVE':
if wait_for_server:
waiters.wait_for_server_status(cls.servers_client,
server_id, 'ACTIVE')
return image
@classmethod
def recreate_server(cls, server_id, validatable=False, **kwargs):
"""Destroy an existing class level server and creates a new one
Some test classes use a test server that can be used by multiple
tests. This is done to optimise runtime and test load.
If something goes wrong with the test server, it can be rebuilt
using this helper.
This helper can also be used for the initial provisioning if no
server_id is specified.
:param server_id: UUID of the server to be rebuilt. If None is
specified, a new server is provisioned.
:param validatable: whether to the server needs to be
validatable. When True, validation resources are acquired via
the `get_class_validation_resources` helper.
:param kwargs: extra paramaters are passed through to the
`create_test_server` call.
:return: the UUID of the created server.
"""
if server_id:
cls.delete_server(server_id)
cls.password = data_utils.rand_password()
server = cls.create_test_server(
validatable,
validation_resources=cls.get_class_validation_resources(
cls.os_primary),
wait_until='ACTIVE',
adminPass=cls.password,
**kwargs)
return server['id']
@classmethod
def delete_server(cls, server_id):
"""Deletes an existing server and waits for it to be gone."""
try:
cls.servers_client.delete_server(server_id)
waiters.wait_for_server_termination(cls.servers_client,
server_id)
except Exception:
LOG.exception('Failed to delete server %s', server_id)
def resize_server(self, server_id, new_flavor_id, **kwargs):
"""resize and confirm_resize an server, waits for it to be ACTIVE."""
self.servers_client.resize_server(server_id, new_flavor_id, **kwargs)
waiters.wait_for_server_status(self.servers_client, server_id,
'VERIFY_RESIZE')
self.servers_client.confirm_resize_server(server_id)
waiters.wait_for_server_status(
self.servers_client, server_id, 'ACTIVE')
server = self.servers_client.show_server(server_id)['server']
self.assert_flavor_equal(new_flavor_id, server['flavor'])
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
try:
cls.volumes_client.delete_volume(volume_id)
# TODO(mriedem): We should move the wait_for_resource_deletion
# into the delete_volume method as a convenience to the caller.
cls.volumes_client.wait_for_resource_deletion(volume_id)
except lib_exc.NotFound:
LOG.warning("Unable to delete volume '%s' since it was not found. "
"Maybe it was already deleted?", volume_id)
@classmethod
def get_server_ip(cls, server, validation_resources=None):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
:param server: The server dict as returned by the API
:param validation_resources: The dict of validation resources
provisioned for the server.
"""
if CONF.validation.connect_method == 'floating':
if validation_resources:
return validation_resources['floating_ip']['ip']
else:
msg = ('When validation.connect_method equals floating, '
'validation_resources cannot be None')
raise lib_exc.InvalidParam(invalid_param=msg)
elif CONF.validation.connect_method == 'fixed':
addresses = server['addresses'][CONF.validation.network_for_ssh]
for address in addresses:
if address['version'] == CONF.validation.ip_version_for_ssh:
return address['addr']
raise exceptions.ServerUnreachable(server_id=server['id'])
else:
raise lib_exc.InvalidConfiguration()
def setUp(self):
super(BaseV2ComputeTest, self).setUp()
self.useFixture(api_microversion_fixture.APIMicroversionFixture(
compute_microversion=self.request_microversion,
volume_microversion=self.volume_request_microversion,
placement_microversion=self.placement_request_microversion))
@classmethod
def create_volume(cls, image_ref=None, **kwargs):
"""Create a volume and wait for it to become 'available'.
:param image_ref: Specify an image id to create a bootable volume.
:param kwargs: other parameters to create volume.
:returns: The available volume.
"""
if 'size' not in kwargs:
kwargs['size'] = CONF.volume.volume_size
if 'display_name' not in kwargs:
vol_name = data_utils.rand_name(cls.__name__ + '-volume')
kwargs['display_name'] = vol_name
if image_ref is not None:
kwargs['imageRef'] = image_ref
if CONF.compute.compute_volume_common_az:
kwargs.setdefault('availability_zone',
CONF.compute.compute_volume_common_az)
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.addClassResourceCleanup(
cls.volumes_client.wait_for_resource_deletion, volume['id'])
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
cls.volumes_client.delete_volume,
volume['id'])
waiters.wait_for_volume_resource_status(cls.volumes_client,
volume['id'], 'available')
return volume
def _detach_volume(self, server, volume):
"""Helper method to detach a volume.
Ignores 404 responses if the volume or server do not exist, or the
volume is already detached from the server.
"""
try:
volume = self.volumes_client.show_volume(volume['id'])['volume']
# Check the status. You can only detach an in-use volume, otherwise
# the compute API will return a 400 response.
if volume['status'] == 'in-use':
self.servers_client.detach_volume(server['id'], volume['id'])
except lib_exc.NotFound:
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
pass
def attach_volume(self, server, volume, device=None, tag=None):
"""Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
:param server: The server to which the volume will be attached.
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
:param tag: Optional device role tag to apply to the volume.
"""
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
if tag:
attach_kwargs['tag'] = tag
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and for multiattach volumes wait for
# the attachment to be removed. For non-multiattach volumes wait for
# the state of the volume to change to available. This is so we don't
# error out when trying to delete the volume during teardown.
if volume['multiattach']:
att = waiters.wait_for_volume_attachment_create(
self.volumes_client, volume['id'], server['id'])
self.addCleanup(waiters.wait_for_volume_attachment_remove,
self.volumes_client, volume['id'],
att['attachment_id'])
else:
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client, volume['id'], 'available')
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
self.addCleanup(self._detach_volume, server, volume)
return attachment
def create_volume_snapshot(self, volume_id, name=None, description=None,
metadata=None, force=False):
name = name or data_utils.rand_name(
self.__class__.__name__ + '-snapshot')
snapshot = self.snapshots_client.create_snapshot(
volume_id=volume_id,
force=force,
display_name=name,
description=description,
metadata=metadata)['snapshot']
self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
snapshot['id'])
self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
waiters.wait_for_volume_resource_status(self.snapshots_client,
snapshot['id'], 'available')
snapshot = self.snapshots_client.show_snapshot(
snapshot['id'])['snapshot']
return snapshot
def assert_flavor_equal(self, flavor_id, server_flavor):
"""Check whether server_flavor equals to flavor.
:param flavor_id: flavor id
:param server_flavor: flavor info returned by show_server.
"""
# Nova API > 2.46 no longer includes flavor.id, and schema check
# will cover whether 'id' should be in flavor
if server_flavor.get('id'):
msg = ('server flavor is not same as flavor!')
self.assertEqual(flavor_id, server_flavor['id'], msg)
else:
flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
self.assertEqual(flavor['name'], server_flavor['original_name'],
"original_name in server flavor is not same as "
"flavor name!")
for key in ['ram', 'vcpus', 'disk']:
msg = ('attribute %s in server flavor is not same as '
'flavor!' % key)
self.assertEqual(flavor[key], server_flavor[key], msg)
class BaseV2ComputeAdminTest(BaseV2ComputeTest):
"""Base test case class for Compute Admin API tests."""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseV2ComputeAdminTest, cls).setup_clients()
cls.availability_zone_admin_client = (
cls.os_admin.availability_zone_client)
cls.admin_flavors_client = cls.os_admin.flavors_client
cls.admin_servers_client = cls.os_admin.servers_client
cls.image_client = cls.os_admin.image_client_v2
cls.admin_assisted_volume_snapshots_client = \
cls.os_admin.assisted_volume_snapshots_client
def create_flavor(self, ram, vcpus, disk, name=None,
is_public='True', **kwargs):
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-flavor")
id = kwargs.pop('id', data_utils.rand_int_id(start=1000))
client = self.admin_flavors_client
flavor = client.create_flavor(
ram=ram, vcpus=vcpus, disk=disk, name=name,
id=id, is_public=is_public, **kwargs)['flavor']
self.addCleanup(client.wait_for_resource_deletion, flavor['id'])
self.addCleanup(client.delete_flavor, flavor['id'])
return flavor
@classmethod
def get_host_for_server(cls, server_id):
server_details = cls.admin_servers_client.show_server(server_id)
return server_details['server']['OS-EXT-SRV-ATTR:host']
def get_host_other_than(self, server_id):
source_host = self.get_host_for_server(server_id)
svcs = self.os_admin.services_client.list_services(
binary='nova-compute')['services']
hosts = []
for svc in svcs:
if svc['state'] == 'up' and svc['status'] == 'enabled':
if CONF.compute.compute_volume_common_az:
if svc['zone'] == CONF.compute.compute_volume_common_az:
hosts.append(svc['host'])
else:
hosts.append(svc['host'])
for target_host in hosts:
if source_host != target_host:
return target_host
| tempest/api/compute/base.py | 30,988 | Base test case class for Compute Admin API tests.
Base test case class for all Compute API tests.
Helper method to detach a volume.
Ignores 404 responses if the volume or server do not exist, or the
volume is already detached from the server.
Check whether server_flavor equals to flavor.
:param flavor_id: flavor id
:param server_flavor: flavor info returned by show_server.
Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
:param server: The server to which the volume will be attached.
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
:param tag: Optional device role tag to apply to the volume.
Wrapper utility that returns an image created from the server.
If compute microversion >= 2.36, the returned image response will
be from the image service API rather than the compute image proxy API.
Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
:param validatable: Whether the server will be pingable or sshable.
:param volume_backed: Whether the instance is volume backed or not.
:param validation_resources: Dictionary of validation resources as
returned by `get_class_validation_resources`.
:param clients: Client manager, defaults to os_primary.
:param kwargs: Extra arguments are passed down to the
`compute.create_test_server` call.
Create a volume and wait for it to become 'available'.
:param image_ref: Specify an image id to create a bootable volume.
:param kwargs: other parameters to create volume.
:returns: The available volume.
Deletes an existing server and waits for it to be gone.
Deletes the given volume and waits for it to be gone.
Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
:param server: The server dict as returned by the API
:param validation_resources: The dict of validation resources
provisioned for the server.
Check the compatibility of selected request microversion
This method will check if selected request microversion
(cls.request_microversion) for test is compatible with respect
to 'max_version'. Compatible means if selected request microversion
is in the range(<=) of 'max_version'.
:param max_version: maximum microversion to compare for compatibility.
Example: '2.30'
:returns: True if selected request microversion is compatible with
'max_version'. False in other case.
Destroy an existing class level server and creates a new one
Some test classes use a test server that can be used by multiple
tests. This is done to optimise runtime and test load.
If something goes wrong with the test server, it can be rebuilt
using this helper.
This helper can also be used for the initial provisioning if no
server_id is specified.
:param server_id: UUID of the server to be rebuilt. If None is
specified, a new server is provisioned.
:param validatable: whether to the server needs to be
validatable. When True, validation resources are acquired via
the `get_class_validation_resources` helper.
:param kwargs: extra paramaters are passed through to the
`create_test_server` call.
:return: the UUID of the created server.
resize and confirm_resize an server, waits for it to be ACTIVE.
Checks is the shared server clean enough for subsequent test.
Method will delete the server when it's dirty.
The setUp method is responsible for creating a new server.
Exceptions raised in tearDown class are fails the test case,
This method supposed to use only by tearDown methods, when
the shared server_id is stored in the server_id of the class.
Repeatedly calls condition() until a timeout.
Copyright 2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Set this to True in subclasses to create a default network. See https://bugs.launchpad.net/tempest/+bug/1844568 TODO(andreaf) We should care also for the alt_manager here but only once client lazy load in the manager is done Setting network=True, subnet=True creates a default network Since nova-network APIs were removed from Nova in the Rocky release, determine, based on the max version from the version document, if the compute API is >Queens and if so, skip tests that rely on nova-network. Find the v2.1 version which will tell us our max version for the compute API we're testing against. The max compute API version in Queens is 2.60 so we cap at that version. NOTE(gmann): This is case where this method is used before calling resource_setup(), where cls.request_microversion is set. There may not be any such case but still we can handle this case. NOTE(snikitin): since microversion v2.37 'networks' field is required For each server schedule wait and delete, so we first delete all and then wait for all The compute image proxy APIs were deprecated in 2.35 so use the images client directly if the API microversion being used is >=2.36. If the image is not found after create_image returned that means the snapshot failed in nova-compute and nova deleted the image. There should be a compute fault recorded with the server in that case, so get the server and dump some details. Compute image client returns response wrapped in 'image' element which is not the case with Glance image client. TODO(mriedem): We should move the wait_for_resource_deletion into the delete_volume method as a convenience to the caller. Check the status. You can only detach an in-use volume, otherwise the compute API will return a 400 response. Ignore 404s on detach in case the server is deleted or the volume is already detached. On teardown detach the volume and for multiattach volumes wait for the attachment to be removed. For non-multiattach volumes wait for the state of the volume to change to available. This is so we don't error out when trying to delete the volume during teardown. Ignore 404s on detach in case the server is deleted or the volume is already detached. Nova API > 2.46 no longer includes flavor.id, and schema check will cover whether 'id' should be in flavor | 6,835 | en | 0.842564 |
"""Script that generates a refresh token for a specific user."""
import os
import sys
import spotipy.util as util
import json
if len(sys.argv) == 2:
username = str(sys.argv[1])
else:
print('Usage: {} username'.format(sys.argv[0]))
sys.exit(1)
scope = 'user-read-currently-playing user-read-playback-state'
# Get tokens from Spotify.
try:
util.prompt_for_user_token(username, scope)
except:
raise RuntimeError('Could not fetch token.')
# Print refresh token.
with open('.cache-{}'.format(username)) as json_file:
data = json.load(json_file)
print('Refresh token for {}: {}'.format(username, data['refresh_token']))
| spotify_setup.py | 648 | Script that generates a refresh token for a specific user.
Get tokens from Spotify. Print refresh token. | 106 | en | 0.692604 |
# -*- coding: utf-8 -*-
import hmac
import requests
from json import dumps
from hashlib import sha1
from .app import api, env
def match_any_if_any(event, events):
return events is None or event in events
class Subscription:
def __init__(self, data):
self.data = data
self.events = data['data'].get('events') # user defined
def __getitem__(self, config):
return self.data[config]
class Subscriptions:
store = {}
@classmethod
def add(cls, sub):
Subscriptions.store[sub['id']] = Subscription(sub)
@classmethod
def is_listening_for(cls, event):
for id, sub in Subscriptions.store.items():
if match_any_if_any(event, sub.events):
return True
return False
@classmethod
def publish(cls, eventid, event, data):
for id, sub in Subscriptions.store.items():
if match_any_if_any(event, sub.events):
requests.post(
sub['endpoint'],
headers={'Content-Type': 'application/json'},
data=dumps(dict(
eventType=event,
cloudEventsVersion='0.1',
contentType='application/vnd.omg.object+json',
eventID=eventid,
data=data
))
)
@classmethod
def remove(cls, eventid):
Subscriptions.store.pop(eventid, None)
@api.route('/webhooks/subscribe')
async def subscribe(req, resp):
data = await req.media()
Subscriptions.add(data)
resp.text = 'Subscribed'
@api.route('/webhooks/unsubscribe')
async def unsubscribe(req, resp):
data = await req.media()
Subscriptions.remove(data['id'])
resp.text = 'Unsubscribed'
@api.route('/webhooks')
async def webhooks(req, resp):
"""
Handle incoming GitHub webhooks
"""
data = await req.media()
eventid = req.headers.get('X-GitHub-Delivery')
event = req.headers.get('X-GitHub-Event')
if not Subscriptions.is_listening_for(event):
resp.text = f'Accepted, but not listening for {event} events.'
return
if env.webhook_secret:
signature = req.headers.get('X-Hub-Signature')
assert signature, 'X-Hub-Signature not found in the header.'
sha_name, signature = signature.split('=')
assert sha_name == 'sha1'
mac = hmac.new(env.webhook_secret, msg=data, digestmod='sha1')
assert str(mac.hexdigest()) == str(signature)
Subscriptions.publish(eventid, event, {'event': event, 'payload': data})
resp.text = 'Accepted'
| app/webhooks.py | 2,678 | -*- coding: utf-8 -*- user defined | 34 | en | 0.835374 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack import resource
class Queue(resource.Resource):
# FIXME(anyone): The name string of `location` field of Zaqar API response
# is lower case. That is inconsistent with the guide from API-WG. This is
# a workaround for this issue.
location = resource.Header("location")
resources_key = "queues"
base_path = "/queues"
# capabilities
allow_create = True
allow_list = True
allow_fetch = True
allow_delete = True
# Properties
#: The default TTL of messages defined for a queue, which will effect for
#: any messages posted to the queue.
default_message_ttl = resource.Body("_default_message_ttl")
#: Description of the queue.
description = resource.Body("description")
#: The max post size of messages defined for a queue, which will effect
#: for any messages posted to the queue.
max_messages_post_size = resource.Body("_max_messages_post_size")
#: Name of the queue. The name is the unique identity of a queue. It
#: must not exceed 64 bytes in length, and it is limited to US-ASCII
#: letters, digits, underscores, and hyphens.
name = resource.Body("name", alternate_id=True)
#: The ID to identify the client accessing Zaqar API. Must be specified
#: in header for each API request.
client_id = resource.Header("Client-ID")
#: The ID to identify the project accessing Zaqar API. Must be specified
#: in case keystone auth is not enabled in Zaqar service.
project_id = resource.Header("X-PROJECT-ID")
def create(self, session, prepend_key=True):
request = self._prepare_request(requires_id=True,
prepend_key=prepend_key)
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.put(request.url,
json=request.body, headers=request.headers)
self._translate_response(response, has_body=False)
return self
@classmethod
def list(cls, session, paginated=False, **params):
"""This method is a generator which yields queue objects.
This is almost the copy of list method of resource.Resource class.
The only difference is the request header now includes `Client-ID`
and `X-PROJECT-ID` fields which are required by Zaqar v2 API.
"""
more_data = True
query_params = cls._query_mapping._transpose(params)
uri = cls.base_path % params
headers = {
"Client-ID": params.get('client_id', None) or str(uuid.uuid4()),
"X-PROJECT-ID": params.get('project_id', None
) or session.get_project_id()
}
while more_data:
resp = session.get(uri,
headers=headers, params=query_params)
resp = resp.json()
resp = resp[cls.resources_key]
if not resp:
more_data = False
yielded = 0
new_marker = None
for data in resp:
value = cls.existing(**data)
new_marker = value.id
yielded += 1
yield value
if not paginated:
return
if "limit" in query_params and yielded < query_params["limit"]:
return
query_params["limit"] = yielded
query_params["marker"] = new_marker
def fetch(self, session, requires_id=True, error_message=None):
request = self._prepare_request(requires_id=requires_id)
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.get(request.url,
headers=headers)
self._translate_response(response)
return self
def delete(self, session):
request = self._prepare_request()
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.delete(request.url,
headers=headers)
self._translate_response(response, has_body=False)
return self
| openstack/message/v2/queue.py | 5,086 | This method is a generator which yields queue objects.
This is almost the copy of list method of resource.Resource class.
The only difference is the request header now includes `Client-ID`
and `X-PROJECT-ID` fields which are required by Zaqar v2 API.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. FIXME(anyone): The name string of `location` field of Zaqar API response is lower case. That is inconsistent with the guide from API-WG. This is a workaround for this issue. capabilities Properties: The default TTL of messages defined for a queue, which will effect for: any messages posted to the queue.: Description of the queue.: The max post size of messages defined for a queue, which will effect: for any messages posted to the queue.: Name of the queue. The name is the unique identity of a queue. It: must not exceed 64 bytes in length, and it is limited to US-ASCII: letters, digits, underscores, and hyphens.: The ID to identify the client accessing Zaqar API. Must be specified: in header for each API request.: The ID to identify the project accessing Zaqar API. Must be specified: in case keystone auth is not enabled in Zaqar service. | 1,625 | en | 0.854858 |
''' setup module
'''
from distutils.core import setup
# TEMPLATE
setup(
name='mask-query-aide',
version='0.0',
description='python code to train ML for detecting people with masks',
long_description=open('README.rst').read(),
author='Christine Madden',
license=open('LICENSE').read(),
author_email='christine.m.madden19@gmail.com',
packages=['mask_query_aide'],
# python_requires="<3.8",
install_requires=[
"numpy==1.16.1",
"pandas",
"matplotlib",
"opencv-python<=4.1.2.30",
"keras==2.2.4",
"tensorflow<2.0",
"tensorflow-gpu<2.0",
"imageai",
"jupyterlab",
"requests",
],
entry_points={
'console_scripts':
[
'mask_query_aide = mask_query_aide.__main__:main',
]
}
)
| setup.py | 834 | setup module
TEMPLATE python_requires="<3.8", | 47 | el | 0.092476 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
__all__ = ["Config"]
class Config(object):
"""
Config: Holds configuration settings.
Parameters
----------
fitParameters : list
Parameters to fit.
parameterPriors : dict
Dictionary with parameters as keys, and a dictionary
as the value for each key. This dictionary is called
to setup the pymc3 priors for each parameter not in
fitParameters.
columnMapping : dict
This dictionary should define the
column names of the user's data relative to the
internally used names.
tableParameterLimits : dict
This is dictionary is called
when building model tables to set the grid in subsolar
temperature and phase angle. It should have 'T_ss' and
'alpha' as keys. Values should be a list:
the first element should be another list
with the lower and upper bounds, the second element
should be the step size.
threads : int
The number of threads to use when bulding model tables
and running the multi-fit script.
numSamples : int
Number of samples to draw from the posterior distribution.
numBurnIn : int
Number of the drawn samples to discard from summary statistics
and plotting.
numChains : int
Number of Markov chains to sample the posterior distribution.
phaseAngleFluxCorrection : float
The default value to correct for phase-angle effects in the
Standard Thermal Model. The canonical value is 0.01.
verbose : bool
Print progress statements?
"""
fitParameters = ["logT1", "logD", "eps"]
parameterPriors = {
"logD": {
"lower": 1,
"upper": 8,
},
"eps": {
"lower": 0.0,
"upper": 1.0},
"logT1": {
"lower": 0.01,
"upper": 5,
},
"T_ss": {
"lower": 10,
"upper": 1200.0
},
"alpha_rad": {
"lower": 0,
"upper": np.pi
},
"r_au": {
"lower": 0,
"upper": 10
},
"delta_au": {
"lower": 0,
"upper": 10
},
"G": {
"lower": 0,
"upper": 1},
"p": {
"lower": 0,
"upper": 5
},
"eta": {
"lower": 0,
"upper": 10
}
}
columnMapping = {
"designation" : "designation",
"obs_id": "obs_id",
"exp_mjd": "mjd",
"r_au": "r_au",
"delta_au": "delta_au",
"alpha_rad": "alpha_rad",
"G": "G",
"logD": "logD",
"logT1" : "logT1",
"eta": "eta",
"eps": "eps",
"flux_si": ["flux_W1_si", "flux_W2_si", "flux_W3_si", "flux_W4_si"],
"fluxErr_si": ["fluxErr_W1_si", "fluxErr_W2_si", "fluxErr_W3_si", "fluxErr_W4_si"],
"mag" : ["mag_W1", "mag_W2", "mag_W3", "mag_W4"],
"magErr" : ["magErr_W1", "magErr_W2", "magErr_W3", "magErr_W4"]
}
tableParameterLimits = {
"T_ss": [[100.0, 1200.0], 0.5],
"alpha": [[0.0, np.pi], np.pi/360]
}
threads = 10
samples = 2500
burnInSamples = 500
chains = 20
phaseAngleFluxCorrection = 0.01
verbose = True
| atm/config.py | 3,438 | Config: Holds configuration settings.
Parameters
----------
fitParameters : list
Parameters to fit.
parameterPriors : dict
Dictionary with parameters as keys, and a dictionary
as the value for each key. This dictionary is called
to setup the pymc3 priors for each parameter not in
fitParameters.
columnMapping : dict
This dictionary should define the
column names of the user's data relative to the
internally used names.
tableParameterLimits : dict
This is dictionary is called
when building model tables to set the grid in subsolar
temperature and phase angle. It should have 'T_ss' and
'alpha' as keys. Values should be a list:
the first element should be another list
with the lower and upper bounds, the second element
should be the step size.
threads : int
The number of threads to use when bulding model tables
and running the multi-fit script.
numSamples : int
Number of samples to draw from the posterior distribution.
numBurnIn : int
Number of the drawn samples to discard from summary statistics
and plotting.
numChains : int
Number of Markov chains to sample the posterior distribution.
phaseAngleFluxCorrection : float
The default value to correct for phase-angle effects in the
Standard Thermal Model. The canonical value is 0.01.
verbose : bool
Print progress statements?
!/usr/bin/env python -*- coding: UTF-8 -*- | 1,428 | en | 0.717856 |
from attr import dataclass
#s4 teng https://t.me/shuraim1/https:/
#S5 teng https://t.me/alquran30juzsaadalghamidi/5
#s6 teng https://t.me/bandar_abdulaziz_balilah/5
#s7 teng https://t.me/Idriss_Akbar/388
#s8 teng https://t.me/yasseraldosari_mp3/2
sura = {
'0': {'s1':'42', 's2':'257', 's3':'18', 's4':'3', 's5':'5', 's6':'5', 's7':'388', 's8':'2',},
'1': {'s1':'42', 's2':'257', 's3':'18', 's4':'3', 's5':'5', 's6':'5', 's7':'388', 's8':'2',},
'2': {'s1':'43', 's2':'258', 's3':'19', 's4':'4', 's5':'6', 's6':'6', 's7':'389', 's8':'3',},
'3': {'s1':'44', 's2':'259', 's3':'20', 's4':'5', 's5':'7', 's6':'7', 's7':'390', 's8':'4',},
'4': {'s1':'45', 's2':'260', 's3':'21', 's4':'6', 's5':'8', 's6':'8', 's7':'391', 's8':'5',},
'5': {'s1':'46', 's2':'261', 's3':'22', 's4':'7', 's5':'9', 's6':'9', 's7':'392', 's8':'6',},
'6': {'s1':'47', 's2':'262', 's3':'23', 's4':'8', 's5':'10', 's6':'10', 's7':'393', 's8':'7',},
'7': {'s1':'48', 's2':'263', 's3':'24', 's4':'9', 's5':'11', 's6':'11', 's7':'394', 's8':'8',},
'8': {'s1':'49', 's2':'264', 's3':'25', 's4':'10', 's5':'12', 's6':'12', 's7':'395', 's8':'9',},
'9': {'s1':'50', 's2':'265', 's3':'26', 's4':'11', 's5':'13', 's6':'13', 's7':'396', 's8':'10',},
'10': {'s1':'51', 's2':'266', 's3':'27', 's4':'12', 's5':'14', 's6':'14', 's7':'397', 's8':'11',},
'11': {'s1': '52', 's2':'267', 's3':'28', 's4':'13', 's5':'15', 's6':'15', 's7':'398', 's8':'12',},
'12': {'s1':'53', 's2':'268', 's3':'29', 's4':'14', 's5':'16', 's6':'16', 's7':'399', 's8':'13',},
'13': {'s1': '54', 's2':'269', 's3':'30', 's4':'15', 's5':'17', 's6':'17', 's7':'401', 's8':'14',},
'14': {'s1':'55', 's2':'270', 's3':'31', 's4':'16', 's5':'18', 's6':'18', 's7':'402', 's8':'15',},
'15': {'s1':'56', 's2':'271', 's3':'32', 's4':'17', 's5':'19', 's6':'19', 's7':'403', 's8':'16',},
'16': {'s1':'59', 's2':'272', 's3':'33', 's4':'18', 's5':'20', 's6':'20', 's7':'404', 's8':'17',},
'17': {'s1':'60', 's2':'273', 's3':'34', 's4':'19', 's5':'21', 's6':'21', 's7':'405', 's8':'18',},
'18' : {'s1':'61', 's2':'274', 's3':'35', 's4':'20', 's5':'22', 's6':'22', 's7':'406', 's8':'19',},
'19': {'s1':'62', 's2':'275', 's3':'36', 's4':'21', 's5':'23', 's6':'23', 's7':'407', 's8':'20',},
'20': {'s1':'63', 's2':'276', 's3':'37', 's4':'22', 's5':'24', 's6':'24', 's7':'408', 's8':'21',},
'21': {'s1':'64', 's2':'277', 's3':'38', 's4':'23', 's5':'25', 's6':'25', 's7':'409', 's8':'22',},
'22': {'s1':'65', 's2':'278', 's3':'39', 's4':'24', 's5':'26', 's6':'26', 's7':'410', 's8':'23',},
'23': {'s1':'66', 's2':'279', 's3':'40', 's4':'25', 's5':'27', 's6':'27', 's7':'411', 's8':'24',},
'24': {'s1':'67', 's2':'280', 's3':'41', 's4':'26', 's5':'28', 's6':'28', 's7':'412', 's8':'25',},
'25': {'s1':'68', 's2':'281', 's3':'42', 's4':'27', 's5':'29', 's6':'29', 's7':'413', 's8':'26',},
'26': {'s1':'69', 's2':'282', 's3':'43', 's4':'28', 's5':'30', 's6':'30', 's7':'414', 's8':'27',},
'27': {'s1':'70', 's2':'283', 's3':'44', 's4':'29', 's5':'31', 's6':'31', 's7':'415', 's8':'28',},
'28': {'s1':'71', 's2':'284', 's3':'45', 's4':'30', 's5':'32', 's6':'32', 's7':'416', 's8':'29',},
'29': {'s1':'72', 's2':'285', 's3':'46', 's4':'31', 's5':'33', 's6':'33', 's7':'417', 's8':'30',},
'30': {'s1':'73', 's2':'286', 's3':'47', 's4':'32', 's5':'34', 's6':'34', 's7':'418', 's8':'31',},
'31': {'s1':'74', 's2':'287', 's3':'48', 's4':'33', 's5':'35', 's6':'35', 's7':'419', 's8':'32',},
'32': {'s1':'75', 's2':'288', 's3':'49', 's4':'34', 's5':'36', 's6':'36', 's7':'420', 's8':'33',},
'33': {'s1':'76', 's2':'289', 's3':'50', 's4':'35', 's5':'37', 's6':'37', 's7':'421', 's8':'34',},
'34': {'s1':'77', 's2':'290', 's3':'51', 's4':'36', 's5':'38', 's6':'38', 's7':'422', 's8':'35',},
'35': {'s1':'78', 's2':'291', 's3':'52', 's4':'37', 's5':'39', 's6':'39', 's7':'423', 's8':'36',},
'36': {'s1':'79', 's2':'292', 's3':'53', 's4':'38', 's5':'40', 's6':'40', 's7':'424', 's8':'37',},
'37': {'s1':'80', 's2':'293', 's3':'54', 's4':'39', 's5':'41', 's6':'41', 's7':'425', 's8':'38',},
'38': {'s1':'81', 's2':'294', 's3':'55', 's4':'40', 's5':'42', 's6':'42', 's7':'426', 's8':'39',},
'39': {'s1':'82', 's2':'295', 's3':'56', 's4':'41', 's5':'43', 's6':'43', 's7':'427', 's8':'40',},
'40': {'s1':'83', 's2':'296', 's3':'57', 's4':'42', 's5':'44', 's6':'44', 's7':'428', 's8':'41',},
'41': {'s1':'84', 's2':'297', 's3':'58', 's4':'43', 's5':'45', 's6':'45', 's7':'429', 's8':'42',},
'42': {'s1':'85', 's2':'298', 's3':'59', 's4':'44', 's5':'46', 's6':'46', 's7':'430', 's8':'43',},
'43': {'s1':'86', 's2':'299', 's3':'60', 's4':'45', 's5':'47', 's6':'47', 's7':'431', 's8':'44',},
'44': {'s1':'87', 's2':'300', 's3':'61', 's4':'46', 's5':'48', 's6':'48', 's7':'432', 's8':'45',},
'45': {'s1':'88', 's2':'301', 's3':'62', 's4':'47', 's5':'49', 's6':'49', 's7':'433', 's8':'46',},
'46': {'s1':'89', 's2':'302', 's3':'63', 's4':'48', 's5':'50', 's6':'50', 's7':'434', 's8':'47',},
'47': {'s1':'90', 's2':'303', 's3':'64', 's4':'49', 's5':'51', 's6':'51', 's7':'435', 's8':'48',},
'48': {'s1':'91', 's2':'304', 's3':'65', 's4':'50', 's5':'52', 's6':'52', 's7':'436', 's8':'49',},
'49': {'s1':'92', 's2':'305', 's3':'66', 's4':'51', 's5':'53', 's6':'53', 's7':'437', 's8':'50',},
'50': {'s1':'93', 's2':'306', 's3':'67', 's4':'52', 's5':'54', 's6':'54', 's7':'438', 's8':'51',},
'51': {'s1':'94', 's2':'307', 's3':'68', 's4':'53', 's5':'55', 's6':'55', 's7':'439', 's8':'52',},
'52': {'s1':'95', 's2':'308', 's3':'69', 's4':'54', 's5':'56', 's6':'56', 's7':'440', 's8':'53',},
'53': {'s1':'96', 's2':'309', 's3':'70', 's4':'55', 's5':'57', 's6':'57', 's7':'441', 's8':'54',},
'54': {'s1':'97', 's2':'310', 's3':'71', 's4':'56', 's5':'58', 's6':'58', 's7':'442', 's8':'55',},
'55': {'s1':'98', 's2':'311', 's3':'72', 's4':'57', 's5':'59', 's6':'59', 's7':'443', 's8':'56',},
'56': {'s1':'99', 's2':'312', 's3':'73', 's4':'58', 's5':'60', 's6':'60', 's7':'444', 's8':'57',},
'57': {'s1':'100', 's2':'313', 's3':'74', 's4':'59', 's5':'61', 's6':'61', 's7':'445', 's8':'58',},
'58': {'s1':'101', 's2':'314', 's3':'75', 's4':'60', 's5':'62', 's6':'62', 's7':'446', 's8':'59',},
'59': {'s1':'102', 's2':'315', 's3':'76', 's4':'61', 's5':'63', 's6':'63', 's7':'447', 's8':'60',},
'60': {'s1':'103', 's2':'316', 's3':'77', 's4':'62', 's5':'64', 's6':'64', 's7':'448', 's8':'61',},
#61 inlinekeyboard starts in here
'61': {'s1':'104', 's2':'317', 's3':'78', 's4':'63', 's5':'65', 's6':'65', 's7':'449', 's8':'62',},
'62': {'s1':'105', 's2':'318', 's3':'79', 's4':'64', 's5':'66', 's6':'66', 's7':'450', 's8':'63',},
'63': {'s1':'106', 's2':'319', 's3':'80', 's4':'65', 's5':'67', 's6':'67', 's7':'451', 's8':'64',},
'64': {'s1':'107', 's2':'320', 's3':'81', 's4':'66', 's5':'68', 's6':'68', 's7':'452', 's8':'65',},
'65': {'s1':'108', 's2':'321', 's3':'82', 's4':'67', 's5':'69', 's6':'69', 's7':'453', 's8':'66',},
'66': {'s1':'109', 's2':'322', 's3':'83', 's4':'68', 's5':'70', 's6':'70', 's7':'454', 's8':'67',},
'67': {'s1':'110', 's2':'323', 's3':'84', 's4':'69', 's5':'71', 's6':'72', 's7':'455', 's8':'68',},
'68': {'s1':'111', 's2':'324', 's3':'85', 's4':'70', 's5':'72', 's6':'73', 's7':'456', 's8':'69',},
'69': {'s1':'112', 's2':'325', 's3':'86', 's4':'71', 's5':'73', 's6':'74', 's7':'457', 's8':'70',},
'70': {'s1':'113', 's2':'326', 's3':'87', 's4':'72', 's5':'74', 's6':'75', 's7':'458', 's8':'71',},
'71': {'s1':'114', 's2':'327', 's3':'88', 's4':'73', 's5':'75', 's6':'76', 's7':'459', 's8':'72',},
'72': {'s1':'115', 's2':'328', 's3':'89', 's4':'74', 's5':'76', 's6':'77', 's7':'460', 's8':'73',},
'73': {'s1':'116', 's2':'329', 's3':'90', 's4':'75', 's5':'77', 's6':'78', 's7':'461', 's8':'74',},
'74': {'s1':'117', 's2':'330', 's3':'91', 's4':'76', 's5':'78', 's6':'79', 's7':'462', 's8':'75',},
'75': {'s1':'118', 's2':'331', 's3':'92', 's4':'77', 's5':'79', 's6':'80', 's7':'463', 's8':'76',},
'76': {'s1':'119', 's2':'332', 's3':'93', 's4':'78', 's5':'80', 's6':'81', 's7':'464', 's8':'77',},
'77': {'s1':'120', 's2':'333', 's3':'94', 's4':'79', 's5':'81', 's6':'82', 's7':'465', 's8':'78',},
'78': {'s1':'121', 's2':'334', 's3':'95', 's4':'80', 's5':'82', 's6':'83', 's7':'466', 's8':'79',},
'79': {'s1':'122', 's2':'335', 's3':'96', 's4':'81', 's5':'83', 's6':'84', 's7':'467', 's8':'80',},
'80': {'s1':'123', 's2':'336', 's3':'97', 's4':'82', 's5':'84', 's6':'85', 's7':'468', 's8':'81',},
'81': {'s1':'124', 's2':'337', 's3':'98', 's4':'83', 's5':'85', 's6':'86', 's7':'469', 's8':'82',},
'82': {'s1':'125', 's2':'338', 's3':'99', 's4':'84', 's5':'86', 's6':'87', 's7':'470', 's8':'83',},
'83': {'s1':'126', 's2':'339', 's3':'100', 's4':'85', 's5':'87', 's6':'88', 's7':'471', 's8':'84',},
'84': {'s1':'127', 's2':'340', 's3':'101', 's4':'86', 's5':'88', 's6':'89', 's7':'472', 's8':'85',},
'85': {'s1':'128', 's2':'341', 's3':'102', 's4':'87', 's5':'89', 's6':'90', 's7':'473', 's8':'86',},
'86': {'s1':'129', 's2':'342', 's3':'103', 's4':'88', 's5':'90', 's6':'91', 's7':'474', 's8':'87',},
'87': {'s1':'130', 's2':'343', 's3':'104', 's4':'89', 's5':'91', 's6':'92', 's7':'475', 's8':'88',},
'88': {'s1':'131', 's2':'344', 's3':'105', 's4':'90', 's5':'92', 's6':'93', 's7':'476', 's8':'89',},
'89': {'s1':'132', 's2':'345', 's3':'106', 's4':'91', 's5':'93', 's6':'94', 's7':'477', 's8':'90',},
'90': {'s1':'133', 's2':'346', 's3':'107', 's4':'92', 's5':'94', 's6':'95', 's7':'478', 's8':'91',},
'91': {'s1':'134', 's2':'347', 's3':'108', 's4':'93', 's5':'95', 's6':'96', 's7':'479', 's8':'92',},
'92': {'s1':'135', 's2':'348', 's3':'109', 's4':'94', 's5':'96', 's6':'97', 's7':'480', 's8':'93',},
'93': {'s1':'136', 's2':'349', 's3':'110', 's4':'95', 's5':'97', 's6':'98', 's7':'481', 's8':'94',},
'94': {'s1':'137', 's2':'350', 's3':'111', 's4':'96', 's5':'98', 's6':'99', 's7':'482', 's8':'95',},
'95': {'s1':'138', 's2':'351', 's3':'112', 's4':'97', 's5':'99', 's6':'100', 's7':'483', 's8':'96',},
'96': {'s1':'139', 's2':'352', 's3':'113', 's4':'98', 's5':'100', 's6':'101', 's7':'484', 's8':'97',},
'97': {'s1':'140', 's2':'353', 's3':'114', 's4':'99', 's5':'101', 's6':'102', 's7':'485', 's8':'98',},
'98': {'s1':'141', 's2':'354', 's3':'115', 's4':'100', 's5':'102', 's6':'103', 's7':'486', 's8':'99',},
'99': {'s1':'142', 's2':'355', 's3':'116', 's4':'101', 's5':'103', 's6':'104', 's7':'487', 's8':'100',},
'100': {'s1':'143', 's2':'356', 's3':'117', 's4':'102', 's5':'104', 's6':'105', 's7':'488', 's8':'101',},
'101': {'s1':'144', 's2':'357', 's3':'118', 's4':'103', 's5':'105', 's6':'106', 's7':'489', 's8':'102',},
'102': {'s1':'145', 's2':'358', 's3':'119', 's4':'104', 's5':'106', 's6':'107', 's7':'490', 's8':'103',},
'103': {'s1':'146', 's2':'359', 's3':'120', 's4':'105', 's5':'107', 's6':'108', 's7':'491', 's8':'104',},
'104': {'s1':'147', 's2':'360', 's3':'121', 's4':'106', 's5':'108', 's6':'109', 's7':'492', 's8':'105',},
'105': {'s1':'148', 's2':'361', 's3':'122', 's4':'107', 's5':'109', 's6':'110', 's7':'493', 's8':'106',},
'106': {'s1':'149', 's2':'362', 's3':'123', 's4':'108', 's5':'110', 's6':'111', 's7':'494', 's8':'107',},
'107': {'s1':'150', 's2':'363', 's3':'124', 's4':'109', 's5':'111', 's6':'112', 's7':'495', 's8':'108',},
'108': {'s1':'151', 's2':'364', 's3':'125', 's4':'110', 's5':'112', 's6':'113', 's7':'496', 's8':'109',},
'109': {'s1':'152', 's2':'365', 's3':'126', 's4':'111', 's5':'113', 's6':'114', 's7':'497', 's8':'110',},
'110': {'s1':'153', 's2':'366', 's3':'127', 's4':'112', 's5':'114', 's6':'115', 's7':'498', 's8':'111',},
'111': {'s1':'154', 's2':'367', 's3':'128', 's4':'113', 's5':'115', 's6':'116', 's7':'499', 's8':'112',},
'112': {'s1':'155', 's2':'368', 's3':'129', 's4':'114', 's5':'116', 's6':'117', 's7':'500', 's8':'113',},
'113': {'s1':'156', 's2':'369', 's3':'130', 's4':'115', 's5':'117', 's6':'118', 's7':'501', 's8':'114',},
'114': {'s1':'157', 's2':'370', 's3':'131', 's4':'116', 's5':'118', 's6':'119', 's7':'502', 's8':'115',}
}
bbc = {'22':'11', 'n':{
'55':'56',
'55':'58',
'55':'59',
'55':'555',
}}
hmuchun = {
'hm5':{
'rep':257,
'rep2':287,
},
'hm6':{
'rep':288,
'rep2':317,
},
'hm7':{
'rep':317,
'rep2':347,
},
'hm8':{
'rep':347,
'rep2':371,
},
'hm9':{
'rep':18,
'rep2':48,
},
'hm10':{
'rep':48,
'rep2':78,
},
'hm11':{
'rep':78,
'rep2':108,
},
'hm12':{
'rep':108,
'rep2':137,
},
}
| pipuchun/jsonuz.py | 13,364 | s4 teng https://t.me/shuraim1/https:/S5 teng https://t.me/alquran30juzsaadalghamidi/5s6 teng https://t.me/bandar_abdulaziz_balilah/5 s7 teng https://t.me/Idriss_Akbar/388s8 teng https://t.me/yasseraldosari_mp3/261 inlinekeyboard starts in here | 243 | en | 0.43409 |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common conv layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from lingvo.core import base_layer
from lingvo.core import bn_layers
from lingvo.core import py_utils
from lingvo.core import tshape
def ComputeConvOutputShape(in_shape,
t_stride,
f_stride,
outc=None,
padding='SAME'):
"""Computes output shape for convolution and pooling layers.
If `in_shape` is a dynamic shape, the output will be Tensors, while if
`in_shape` is a list of ints then the output will also be a list of ints.
Args:
in_shape: A length 4 Tensor or list representing the input shape.
t_stride: The stride along the time dimension.
f_stride: The stride along the frequency dimension.
outc: The expected output channel. If None, will use the input channel.
padding: 'SAME' or 'VALID'.
Returns:
The expected output shape.
"""
# In the order of batch, time, frequency, channel
n = in_shape[0]
t = in_shape[1]
f = in_shape[2]
c = in_shape[3]
# Last two dimensions has to be specified.
assert f is not None and c is not None
if padding == 'VALID':
if t:
t -= t_stride - 1
f -= f_stride - 1
ot = t
if ot is not None:
ot = (ot + t_stride - 1) // t_stride
of = (f + f_stride - 1) // f_stride
if outc is None:
outc = c
return [n, ot, of, outc]
def ComputeConvOutputPadding(paddings, window, stride,
padding_algorithm='SAME'):
"""Computes paddings for convolution and pooling output.
out_padding[i] == 1 iff any in_padding corresponding to that output is 1.
Args:
paddings: The paddings tensor. It is expected to be of shape [batch, time].
window: The size of the windows.
stride: The time-stride between adjacent windows.
padding_algorithm: 'SAME' or 'VALID'.
Returns:
out_padding, The new padding tensor of size [batch, ceil(time / stride)].
"""
if stride == 1:
return paddings
# Pad so input_length divides stride.
input_length = py_utils.GetShape(paddings)[1]
pad_len = (input_length + stride - 1) // stride * stride - input_length
paddings = tf.pad(paddings, [[0, 0], [0, pad_len]], constant_values=1.0)
out_padding = tf.nn.pool(
tf.expand_dims(paddings, -1),
[window],
'MAX',
padding_algorithm,
strides=[stride],
)
return tf.squeeze(out_padding, -1)
class BaseConv2DLayerWithPadding(base_layer.BaseLayer):
"""Base class for 2D convolution layers."""
@classmethod
def Params(cls):
p = super(BaseConv2DLayerWithPadding, cls).Params()
p.Define(
'filter_shape', (0, 0, 0, 0),
'Filter shape. Must be a sequence of length 4. Elements are in'
' the order of height (time), width (frequency), in_channel,'
' out_channel. For causal convolution, filter_shape[0]'
' is the actual number of trained weights in the time dimension'
' of the kernel.')
p.Define(
'filter_stride', (1, 1),
'Filter stride to use. Must be a pair of ints. The first int'
' specifies the stride on the time dimension. The second int'
' specifies the stride on the frequency dimension.')
p.Define(
'dilation_rate', (1, 1),
'If > 1, dilation rate for atrous convolution. '
'Must be a pair of ints. '
'The first int specifies the dilation rate on the time dimension. '
'The second int specifies the dilation rate on the frequency '
'dimension. '
'If any value of dilation_rate is > 1, then all values of strides '
'must be 1.')
p.Define(
'weight_norm', False,
'If true, apply weight normalization to weights as proposed by'
' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868')
return p
@base_layer.initializer
def __init__(self, params):
super(BaseConv2DLayerWithPadding, self).__init__(params)
p = self.params
assert p.name
assert len(p.filter_shape) == 4
assert len(p.filter_stride) == 2
assert all(x > 0 for x in p.filter_shape)
assert all(x > 0 for x in p.filter_stride)
assert len(p.dilation_rate) == 2
assert all(x > 0 for x in p.dilation_rate)
# Dilation and stride can't be combined.
if any(x > 1 for x in p.dilation_rate):
assert all(x == 1 for x in p.filter_stride)
@property
def output_channels(self):
"""The number of output channels for this conv layer."""
raise NotImplementedError()
@property
def input_channels(self):
"""The number of input channels for this conv layer."""
return self.params.filter_shape[2]
def OutShape(self, in_shape):
"""Compute the output shape given the input shape."""
p = self.params
return ComputeConvOutputShape(in_shape, p.filter_stride[0],
p.filter_stride[1], self.output_channels)
def FProp(self, theta, inputs, paddings):
"""Apply convolution to inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. It is expected to be of shape [batch, time,
frequency, channel]. The time dimension corresponds to the height
dimension as in images and the frequency dimension corresponds to the
width dimension as in images.
paddings: The paddings tensor, expected to be of shape [batch, time].
Returns:
outputs, out_paddings pair.
"""
p = self.params
with tf.name_scope(p.name):
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),
py_utils.assert_shape_match(
tf.shape(inputs),
tf.concat([tf.shape(paddings), [-1, self.input_channels]], 0))
], inputs)
def _ApplyPadding(tensor_in, padding_in):
padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, -1), -1)
return tensor_in * (1.0 - padding_expanded)
# Zeroing out padded inputs.
inputs = _ApplyPadding(inputs, paddings)
# Evaluate the conv kernel on 'inputs'.
out = self._EvaluateConvKernel(theta, inputs)
# NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1.
# But there's likely no real problems. Trying to set it gives an error:
# pooling with SAME padding is not implemented for dilation_rate > 1.
# NOTE: we use window=p.filter_stride[0] to be compatible with legacy
# implementation. Consider updating it to be the actual shape.
conv_padding = ComputeConvOutputPadding(
paddings, window=p.filter_stride[0], stride=p.filter_stride[0])
# Assuming padded nodes will be properly zero-ed out if necessary by
# sub-sequent layers.
# out = _ApplyPadding(out, conv_padding)
out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs)))
return out, conv_padding
def _EvaluateConvKernel(self, theta, conv_input):
"""Evaluate the convolution kernel on input 'conv_input'."""
raise NotImplementedError
class Conv2DLayerWithPadding(BaseConv2DLayerWithPadding):
"""Conv2D layer."""
@base_layer.initializer
def __init__(self, params):
super(Conv2DLayerWithPadding, self).__init__(params)
p = self.params
assert p.name
w_pc = py_utils.WeightParams(
shape=p.filter_shape,
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
with tf.variable_scope(p.name):
self.CreateVariable('w', w_pc)
if p.weight_norm:
self.CreateVariable(
'g',
py_utils.WeightParams(
shape=[p.filter_shape[-1]],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars']))
@property
def output_channels(self):
"""The number of output channels for this conv layer."""
p = self.params
return p.filter_shape[-1]
def _GetWeight(self, theta):
p = self.params
if p.weight_norm:
# Normalize along the last dim (standard conv).
filter_w = tf.nn.l2_normalize(theta.w, [0, 1, 2]) * tf.reshape(
(theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]])
else:
filter_w = theta.w
return filter_w
def _EvaluateConvKernel(self, theta, inputs):
"""Apply convolution to inputs."""
p = self.params
filter_w = self._GetWeight(theta)
return tf.nn.convolution(
inputs,
filter_w,
strides=p.filter_stride,
dilation_rate=p.dilation_rate,
data_format='NHWC',
padding='SAME')
class CausalConv2DLayerWithPadding(Conv2DLayerWithPadding):
"""2D conv layer with causal dependency on the time axis."""
@base_layer.initializer
def __init__(self, params):
super(CausalConv2DLayerWithPadding, self).__init__(params)
p = self.params
assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.'
def _EvaluateConvKernel(self, theta, inputs):
"""Apply convolution to inputs."""
p = self.params
assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.'
# Use VALID padding and shift the inputs to the right to ensure that the
# first output only depends on the first input and so on. The output is
# the same size as the input, as if the convolution used SAME padding.
padding_algorithm = 'VALID'
# The effective spatial filter width for dilated convolutions is
# (kernel_width - 1) * dilation_rate + 1 as according to
# https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.convolution(
inputs,
filter_w,
strides=p.filter_stride,
dilation_rate=p.dilation_rate,
data_format='NHWC',
padding=padding_algorithm)
class DepthwiseConv2DLayer(BaseConv2DLayerWithPadding):
"""Depthwise conv 2D layer.
paper: https://arxiv.org/abs/1610.02357
"""
@classmethod
def Params(cls):
p = super(DepthwiseConv2DLayer, cls).Params()
# Redefine 'filter_shape' since the semantic of shape elements is different
# from regular Conv2D.
p.Delete('filter_shape')
p.Define(
'filter_shape', (0, 0, 0, 0),
'Filter shape. Must be a sequence of length 4. Elements are in'
' the order of height (time), width (frequency), in_channel,'
' channel_multipliers. ')
return p
@base_layer.initializer
def __init__(self, params):
super(DepthwiseConv2DLayer, self).__init__(params)
p = self.params
assert p.name
w_pc = py_utils.WeightParams(
shape=p.filter_shape,
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
with tf.variable_scope(p.name):
self.CreateVariable('w', w_pc)
if p.weight_norm:
self.CreateVariable(
'g',
py_utils.WeightParams(
shape=[p.filter_shape[2], p.filter_shape[3]],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars']))
@property
def output_channels(self):
"""The number of output channels for this conv layer."""
p = self.params
# Depthwise convolution filter shape is:
# [..., in_channels, channel_multiplier].
return p.filter_shape[2] * p.filter_shape[3]
def _GetWeight(self, theta):
p = self.params
if p.weight_norm:
# Normalize along the last two dims.
filter_w = tf.nn.l2_normalize(theta.w, [0, 1]) * tf.reshape(
(theta.g + 1.0), [1, 1, p.filter_shape[2], p.filter_shape[3]])
else:
filter_w = theta.w
return filter_w
def _EvaluateConvKernel(self, theta, inputs):
"""Apply convolution to inputs."""
p = self.params
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(
inputs,
filter_w,
strides=[1, p.filter_stride[0], p.filter_stride[1], 1],
rate=p.dilation_rate,
data_format='NHWC',
padding='SAME')
class CausalDepthwiseConv2DLayer(DepthwiseConv2DLayer):
"""Depthwise conv layer with causal dependency on the time axis."""
@base_layer.initializer
def __init__(self, params):
super(CausalDepthwiseConv2DLayer, self).__init__(params)
p = self.params
assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.'
def _EvaluateConvKernel(self, theta, inputs):
"""Apply convolution to inputs."""
p = self.params
assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.'
# Use VALID padding and shift the inputs to the right to ensure that the
# first output only depends on the first input and so on. The output is
# the same size as the input, as if the convolution used SAME padding.
padding_algorithm = 'VALID'
# The effective spatial filter width for dilated convolutions is
# (kernel_width - 1) * dilation_rate + 1 as according to
# https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(
inputs,
filter_w,
strides=[1, p.filter_stride[0], p.filter_stride[1], 1],
rate=p.dilation_rate,
data_format='NHWC',
padding=padding_algorithm)
class NormalizedDepthwiseConv2DLayer(DepthwiseConv2DLayer):
"""DepthwiseConv2DLayer where weights are normalized over the time dim.
https://arxiv.org/abs/1901.10430
"""
@classmethod
def Params(cls):
p = super(NormalizedDepthwiseConv2DLayer, cls).Params()
p.Define('dropconnect_prob', 0.0,
'Prob at which DropConnect regularization is performed.')
p.Define('deterministic_dropout', False, 'Use determnisitc dropout or not.')
p.Define('temperature', 1.0,
'Temperature for the softmax normalization of the weights.')
p.Define('weight_tiling_factor', 1,
'Number of times weights are tiled over the input channels.')
return p
@base_layer.initializer
def __init__(self, params):
super(NormalizedDepthwiseConv2DLayer, self).__init__(params)
p = self.params
assert p.filter_shape[1] == 1, 'Only 1d convolution is supported.'
assert p.temperature > 0.0, 'Absolute zero temperature is not possible.'
@property
def output_channels(self):
"""The number of output channels for this conv layer."""
p = self.params
# Depthwise convolution filter shape is:
# [kernel_size, 1, in_channels, channel_multiplier].
return p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor
@property
def input_channels(self):
"""The number of output channels for this conv layer."""
p = self.params
return p.filter_shape[2] * p.weight_tiling_factor
def _GetWeight(self, theta):
p = self.params
filter_w = theta.w
# First normalize filter_w over the temporal dimension here.
filter_w = tf.nn.softmax(filter_w / p.temperature, axis=0)
# Add dropconnect on the weights for regularization.
if p.dropconnect_prob > 0.0 and not p.is_eval:
if p.deterministic_dropout:
filter_w = py_utils.DeterministicDropout(
filter_w, 1.0 - p.dropconnect_prob,
py_utils.GenerateStepSeedPair(p, theta.global_step))
else:
filter_w = tf.nn.dropout(
filter_w, 1.0 - p.dropconnect_prob, seed=p.random_seed)
# Tie the parameters of every subsequent number of weight_tiling_factor
# channels.
filter_w = tf.tile(filter_w, [1, 1, p.weight_tiling_factor, 1])
return filter_w
@classmethod
def FPropMeta(cls, p, inputs, paddings):
py_utils.CheckShapes((inputs, paddings))
b, t, f, ic = inputs
assert f == 1
oc = p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor
outputs = tshape.Shape([b, t, f, oc])
flops = b * t * f * p.filter_shape[0] * ic * oc * 5
return py_utils.NestedMap(flops=flops, out_shapes=(outputs, paddings))
class CausalNormalizedDepthwiseConv2DLayer(NormalizedDepthwiseConv2DLayer):
"""Depthwise conv layer with causal dependency on the time axis."""
def _EvaluateConvKernel(self, theta, inputs):
"""Apply convolution to inputs."""
# Same as CausalDepthwiseConv2DLayer.
p = self.params
assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.'
padding_algorithm = 'VALID'
causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(
inputs,
filter_w,
strides=[1, p.filter_stride[0], p.filter_stride[1], 1],
rate=p.dilation_rate,
data_format='NHWC',
padding=padding_algorithm)
class ConvBatchNormLayer(bn_layers.BatchNormLayer):
"""A wrapper around regular BatchNormLayer that pass around the ...
paddings layers.
"""
def FProp(self, theta, inputs, paddings):
paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1)
bned = super(ConvBatchNormLayer, self).FProp(
theta, inputs, paddings_expanded)
return bned, paddings
# Supported activation functions.
_ACTIVATIONS = {
'RELU': tf.nn.relu,
'RELU6': tf.nn.relu6,
'SIGMOID': tf.sigmoid,
'TANH': tf.tanh,
'SWISH': tf.nn.swish,
'NONE': tf.identity,
}
class ActivationLayer(base_layer.BaseLayer):
"""Applies activation function to the inputs."""
@classmethod
def Params(cls):
p = super(ActivationLayer, cls).Params()
p.Define('activation', 'RELU',
'The activation function to apply')
return p
def FProp(self, theta, inputs, paddings):
p = self.params
out = _ACTIVATIONS[p.activation](inputs)
return out, paddings
class PaddingLayer(base_layer.BaseLayer):
"""Zeros out padded positions."""
def FProp(self, theta, inputs, paddings):
paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1)
return inputs * (1.0 - paddings_expanded), paddings
| lingvo/core/conv_layers_with_time_padding.py | 19,230 | Applies activation function to the inputs.
Base class for 2D convolution layers.
2D conv layer with causal dependency on the time axis.
Depthwise conv layer with causal dependency on the time axis.
Depthwise conv layer with causal dependency on the time axis.
Conv2D layer.
A wrapper around regular BatchNormLayer that pass around the ...
paddings layers.
Depthwise conv 2D layer.
paper: https://arxiv.org/abs/1610.02357
DepthwiseConv2DLayer where weights are normalized over the time dim.
https://arxiv.org/abs/1901.10430
Zeros out padded positions.
Computes paddings for convolution and pooling output.
out_padding[i] == 1 iff any in_padding corresponding to that output is 1.
Args:
paddings: The paddings tensor. It is expected to be of shape [batch, time].
window: The size of the windows.
stride: The time-stride between adjacent windows.
padding_algorithm: 'SAME' or 'VALID'.
Returns:
out_padding, The new padding tensor of size [batch, ceil(time / stride)].
Computes output shape for convolution and pooling layers.
If `in_shape` is a dynamic shape, the output will be Tensors, while if
`in_shape` is a list of ints then the output will also be a list of ints.
Args:
in_shape: A length 4 Tensor or list representing the input shape.
t_stride: The stride along the time dimension.
f_stride: The stride along the frequency dimension.
outc: The expected output channel. If None, will use the input channel.
padding: 'SAME' or 'VALID'.
Returns:
The expected output shape.
Apply convolution to inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. It is expected to be of shape [batch, time,
frequency, channel]. The time dimension corresponds to the height
dimension as in images and the frequency dimension corresponds to the
width dimension as in images.
paddings: The paddings tensor, expected to be of shape [batch, time].
Returns:
outputs, out_paddings pair.
Compute the output shape given the input shape.
Evaluate the convolution kernel on input 'conv_input'.
Apply convolution to inputs.
Apply convolution to inputs.
Apply convolution to inputs.
Apply convolution to inputs.
Apply convolution to inputs.
The number of input channels for this conv layer.
The number of output channels for this conv layer.
The number of output channels for this conv layer.
The number of output channels for this conv layer.
The number of output channels for this conv layer.
The number of output channels for this conv layer.
Common conv layers.
Lint as: python2, python3 Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== In the order of batch, time, frequency, channel Last two dimensions has to be specified. Pad so input_length divides stride. Dilation and stride can't be combined. Zeroing out padded inputs. Evaluate the conv kernel on 'inputs'. NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1. But there's likely no real problems. Trying to set it gives an error: pooling with SAME padding is not implemented for dilation_rate > 1. NOTE: we use window=p.filter_stride[0] to be compatible with legacy implementation. Consider updating it to be the actual shape. Assuming padded nodes will be properly zero-ed out if necessary by sub-sequent layers. out = _ApplyPadding(out, conv_padding) Normalize along the last dim (standard conv). Use VALID padding and shift the inputs to the right to ensure that the first output only depends on the first input and so on. The output is the same size as the input, as if the convolution used SAME padding. The effective spatial filter width for dilated convolutions is (kernel_width - 1) * dilation_rate + 1 as according to https://www.tensorflow.org/api_docs/python/tf/nn/convolution. Redefine 'filter_shape' since the semantic of shape elements is different from regular Conv2D. Depthwise convolution filter shape is: [..., in_channels, channel_multiplier]. Normalize along the last two dims. Use VALID padding and shift the inputs to the right to ensure that the first output only depends on the first input and so on. The output is the same size as the input, as if the convolution used SAME padding. The effective spatial filter width for dilated convolutions is (kernel_width - 1) * dilation_rate + 1 as according to https://www.tensorflow.org/api_docs/python/tf/nn/convolution. Depthwise convolution filter shape is: [kernel_size, 1, in_channels, channel_multiplier]. First normalize filter_w over the temporal dimension here. Add dropconnect on the weights for regularization. Tie the parameters of every subsequent number of weight_tiling_factor channels. Same as CausalDepthwiseConv2DLayer. Supported activation functions. | 5,342 | en | 0.787055 |
import torch
from recstudio.ann import sampler
from recstudio.data import dataset
from recstudio.model import basemodel, loss_func, scorer
r"""
HGN
########
Paper Reference:
Chen ma, et al. "HGN: Hierarchical Gating Networks for Sequential Recommendation" in KDD2019.
https://dl.acm.org/doi/abs/10.1145/3292500.3330984
"""
class HGNQueryEncoder(torch.nn.Module):
def __init__(self, fuid, fiid, num_users, embed_dim, max_seq_len, item_encoder, pooling_type='mean') -> None:
super().__init__()
self.fuid = fuid
self.fiid = fiid
self.item_encoder = item_encoder
self.pooling_type = pooling_type
self.user_embedding = torch.nn.Embedding(num_users, embed_dim, 0)
self.W_g_1 = torch.nn.Linear(embed_dim, embed_dim, bias=False)
self.W_g_2 = torch.nn.Linear(embed_dim, embed_dim, bias=False)
self.b_g = torch.nn.Parameter(torch.empty(embed_dim), requires_grad=True)
self.w_g_3 = torch.nn.Linear(embed_dim, 1, bias=False)
self.W_g_4 = torch.nn.Linear(embed_dim, max_seq_len)
def forward(self, batch):
U = self.user_embedding(batch[self.fuid])
S = self.item_encoder(batch['in_'+self.fiid])
S_F = S * torch.sigmoid(self.W_g_1(S) + self.W_g_2(U).view(U.size(0), 1, -1) + self.b_g)
weight = torch.sigmoid(self.w_g_3(S_F) + (U@self.W_g_4.weight[:S.size(1)].T).view(U.size(0), -1, 1)) # BxLx1
S_I = S_F * weight
if self.pooling_type == 'mean':
s = S_I.sum(1) / weight.sum(1)
elif self.pooling_type == 'max':
s = torch.max(S_I, dim=1).values
else:
raise ValueError("`pooling_type` only support `avg` and `max`")
query = U + s + S.sum(1)
return query
class HGN(basemodel.BaseRetriever):
r"""HGN proposes a hierarchical gating network, integrated with the Bayesian Personalized Ranking
(BPR) to capture both the long-term and short-term user interests. HGN consists of a feature
gating module, an instance gating module, and an item-item product module."""
def _get_dataset_class(self):
r"""The dataset is SeqDataset."""
return dataset.SeqDataset
def _get_query_encoder(self, train_data):
return HGNQueryEncoder(self.fuid, self.fiid, train_data.num_users, self.embed_dim, \
train_data.config['max_seq_len'], self.item_encoder, self.config['pooling_type'])
def _get_scorer_func(self):
return scorer.InnerProductScorer()
def _get_loss_func(self):
r"""BPR loss is used."""
return loss_func.BPRLoss()
def _get_sampler(self, train_data):
return sampler.UniformSampler(train_data.num_items-1)
| recstudio/model/seq/hgn.py | 2,723 | HGN proposes a hierarchical gating network, integrated with the Bayesian Personalized Ranking
(BPR) to capture both the long-term and short-term user interests. HGN consists of a feature
gating module, an instance gating module, and an item-item product module.
The dataset is SeqDataset.
BPR loss is used.
BxLx1 | 314 | en | 0.819695 |
import time
import board
import debouncer
import busio as io
import digitalio
import pulseio
import adafruit_ssd1306
i2c = io.I2C(board.SCL, board.SDA)
reset_pin = digitalio.DigitalInOut(board.D11)
oled = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c, reset=reset_pin)
button_select = debouncer.Debouncer(board.D7, mode=digitalio.Pull.UP)
button_play = debouncer.Debouncer(board.D9, mode=digitalio.Pull.UP)
C4 = 261
C_SH_4 = 277
D4 = 293
D_SH_4 = 311
E4 = 329
F4 = 349
F_SH_4 = 369
G4 = 392
G_SH_4 = 415
A4 = 440
A_SH_4 = 466
B4 = 493
# pylint: disable=line-too-long
songbook = {'Twinkle Twinkle': [(C4, 0.5), (C4, 0.5), (G4, 0.5), (G4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 1.0), (0, 0.5),
(F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (D4, 0.5), (C4, 0.5), (0, 0.5),
(G4, 0.5), (G4, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5),
(G4, 0.5), (G4, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5),
(C4, 0.5), (C4, 0.5), (G4, 0.5), (G4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 1.0), (0, 0.5),
(F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (D4, 0.5), (C4, 0.5), (0, 0.5)],
'ItsyBitsy Spider': [(G4, 0.5), (C4, 0.5), (C4, 0.5), (C4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (C4, 0.5), (D4, 0.5), (E4, 0.5), (C4, 0.5), (0, 0.5),
(E4, 0.5), (E4, 0.5), (F4, 0.5), (G4, 0.5), (G4, 0.5), (F4, 0.5), (E4, 0.5), (F4, 0.5), (G4, 0.5), (E4, 0.5), (0, 0.5)],
'Old MacDonald': [(G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5),
(B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5),
(D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5),
(B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5),
(D4, 0.5), (D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5),
(G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5),
(G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5),
(G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5),
(B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5)]
}
# pylint: enable=line-too-long
def play_note(note):
if note[0] != 0:
pwm = pulseio.PWMOut(board.D12, duty_cycle = 0, frequency=note[0])
# Hex 7FFF (binary 0111111111111111) is half of the largest value for a 16-bit int,
# i.e. 50%
pwm.duty_cycle = 0x7FFF
time.sleep(note[1])
if note[0] != 0:
pwm.deinit()
def play_song(songname):
for note in songbook[songname]:
play_note(note)
def update(songnames, selected):
oled.fill(0)
line = 0
for songname in songnames:
if line == selected:
oled.text(">", 0, line * 8)
oled.text(songname, 10, line * 8)
line += 1
oled.show()
selected_song = 0
song_names = sorted(list(songbook.keys()))
while True:
button_select.update()
button_play.update()
update(song_names, selected_song)
if button_select.fell:
print("select")
selected_song = (selected_song + 1) % len(songbook)
elif button_play.fell:
print("play")
play_song(song_names[selected_song])
| CircuitPython_101/basic_data_structures/song_book/code.py | 3,729 | pylint: disable=line-too-long pylint: enable=line-too-long Hex 7FFF (binary 0111111111111111) is half of the largest value for a 16-bit int, i.e. 50% | 149 | en | 0.598127 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-23 18:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('voting', '0002_auto_20170223_1054'),
]
operations = [
migrations.RemoveField(
model_name='voteballot',
name='vote_candidate',
),
migrations.AddField(
model_name='voteballot',
name='candidates',
field=models.ManyToManyField(related_name='vote_ballot', to='voting.Candidate', verbose_name="Vote's Candidate"),
),
migrations.AddField(
model_name='voteballot',
name='position',
field=models.CharField(choices=[('P', 'President'), ('A', 'VP of Administration'), ('T', 'Treasurer'), ('S', 'VP of Service'), ('N', 'VP of New Member Services'), ('O', 'VP of Social Affairs'), ('J', 'VP of Standards'), ('R', 'Risk Management'), ('B', 'Standards Board')], default='P', max_length=1),
),
]
| texaslan/voting/migrations/0003_auto_20170223_1207.py | 1,063 | -*- coding: utf-8 -*- Generated by Django 1.10 on 2017-02-23 18:07 | 66 | en | 0.759516 |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Mantle Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool acceptance of raw transactions."""
from decimal import Decimal
from io import BytesIO
import math
from test_framework.test_framework import MantleTestFramework
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTransaction,
CTxOut,
MAX_BLOCK_BASE_SIZE,
MAX_MONEY,
)
from test_framework.script import (
hash160,
CScript,
OP_0,
OP_2,
OP_3,
OP_CHECKMULTISIG,
OP_EQUAL,
OP_HASH160,
OP_RETURN,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
hex_str_to_bytes,
)
class MempoolAcceptanceTest(MantleTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-txindex','-permitbaremultisig=0',
]] * self.num_nodes
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def check_mempool_result(self, result_expected, *args, **kwargs):
"""Wrapper to check result of testmempoolaccept on node_0's mempool"""
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) # Must not change mempool state
def run_test(self):
node = self.nodes[0]
self.log.info('Start with empty mempool, and 200 blocks')
self.mempool_size = 0
assert_equal(node.getblockcount(), 200)
assert_equal(node.getmempoolinfo()['size'], self.mempool_size)
coins = node.listunspent()
self.log.info('Should not accept garbage to testmempoolaccept')
assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar'))
assert_raises_rpc_error(-8, 'Array must contain exactly one raw transaction for now', lambda: node.testmempoolaccept(rawtxs=['ff00baar', 'ff22']))
assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar']))
self.log.info('A transaction already in the blockchain')
coin = coins.pop() # Pick a random coin(base) to spend
raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout']}],
outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}],
))['hex']
txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, maxfeerate=0)
node.generate(1)
self.mempool_size = 0
self.check_mempool_result(
result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': 'txn-already-known'}],
rawtxs=[raw_tx_in_block],
)
self.log.info('A transaction not in the mempool')
fee = Decimal('0.000007')
raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later
outputs=[{node.getnewaddress(): Decimal('0.3') - fee}],
))['hex']
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee}}],
rawtxs=[raw_tx_0],
)
self.log.info('A final transaction not in the mempool')
coin = coins.pop() # Pick a random coin(base) to spend
output_amount = Decimal('0.025')
raw_tx_final = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout'], "sequence": 0xffffffff}], # SEQUENCE_FINAL
outputs=[{node.getnewaddress(): output_amount}],
locktime=node.getblockcount() + 2000, # Can be anything
))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_final)))
fee_expected = coin['amount'] - output_amount
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee_expected}}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
node.sendrawtransaction(hexstring=raw_tx_final, maxfeerate=0)
self.mempool_size += 1
self.log.info('A transaction in the mempool')
node.sendrawtransaction(hexstring=raw_tx_0)
self.mempool_size += 1
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'txn-already-in-mempool'}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that replaces a mempool transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(fee * COIN) # Double the fee
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1 # Now, opt out of RBF
raw_tx_0 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': (2 * fee)}}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that conflicts with an unconfirmed tx')
# Send the transaction that replaces the mempool transaction and opts out of replaceability
node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0)
# take original raw_tx_0
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(4 * fee * COIN) # Set more fee
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'txn-mempool-conflict'}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
self.log.info('A transaction with missing inputs, that never existed')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14)
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with missing inputs, that existed once in the past')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout.n = 1 # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
raw_tx_1 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, maxfeerate=0)
# Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[
{'txid': txid_0, 'vout': 0},
{'txid': txid_1, 'vout': 0},
],
outputs=[{node.getnewaddress(): 0.1}]
))['hex']
txid_spend_both = node.sendrawtransaction(hexstring=raw_tx_spend_both, maxfeerate=0)
node.generate(1)
self.mempool_size = 0
# Now see if we can add the coins back to the utxo set by sending the exact txs again
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_0],
)
self.check_mempool_result(
result_expected=[{'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_1],
)
self.log.info('Create a signed "reference" tx for later use')
raw_tx_reference = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': txid_spend_both, 'vout': 0}],
outputs=[{node.getnewaddress(): 0.05}],
))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
# Reference tx should be valid on itself
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': { 'base': Decimal('0.1') - Decimal('0.05')}}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
self.log.info('A transaction with no outputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = []
# Skip re-signing the transaction for context independent checks from now on
# tx.deserialize(BytesIO(hex_str_to_bytes(node.signrawtransactionwithwallet(tx.serialize().hex())['hex'])))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-empty'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A really large transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_BASE_SIZE / len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-oversize'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with negative output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue *= -1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-negative'}],
rawtxs=[tx.serialize().hex()],
)
# The following two validations prevent overflow of the output amounts (see CVE-2010-5139).
self.log.info('A transaction with too large output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue = MAX_MONEY + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-toolarge'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with too large sum of output values')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = [tx.vout[0]] * 2
tx.vout[0].nValue = MAX_MONEY
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-txouttotal-toolarge'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with duplicate inputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-inputs-duplicate'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A coinbase transaction')
# Pick the input of the first tx we signed, so it has to be a coinbase tx
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_coinbase_spent)))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'coinbase'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('Some nonstandard transactions')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.nVersion = 3 # A version currently non-standard
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'version'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_0]) # Some non-standard script
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptpubkey'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
tx.vout[0].scriptPubKey = CScript([OP_2, pubkey, pubkey, pubkey, OP_3, OP_CHECKMULTISIG]) # Some bare multisig script (2-of-3)
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bare-multisig'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([OP_HASH160]) # Some not-pushonly scriptSig
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-not-pushonly'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([b'a' * 1648]) # Some too large scriptSig (>1650 bytes)
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-size'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b'burn'), OP_EQUAL]))
num_scripts = 100000 // len(output_p2sh_burn.serialize()) # Use enough outputs to make the tx too large for our policy
tx.vout = [output_p2sh_burn] * num_scripts
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'tx-size'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0] = output_p2sh_burn
tx.vout[0].nValue -= 1 # Make output smaller, such that it is dust for our policy
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'dust'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
tx.vout = [tx.vout[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'multi-op-return'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A timelocked transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence -= 1 # Should be non-max, so locktime is not ignored
tx.nLockTime = node.getblockcount() + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'non-final'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction that is locked by BIP68 sequence logic')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence = 2 # We could include it in the second block mined from now, but not the very next one
# Can skip re-signing the tx because of early rejection
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'non-BIP68-final'}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
if __name__ == '__main__':
MempoolAcceptanceTest().main()
| test/functional/mempool_accept.py | 16,241 | Wrapper to check result of testmempoolaccept on node_0's mempool
Test mempool acceptance of raw transactions.
!/usr/bin/env python3 Copyright (c) 2017-2020 The Mantle Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Must not change mempool state Pick a random coin(base) to spend RBF is used later Pick a random coin(base) to spend SEQUENCE_FINAL Can be anything Double the fee Now, opt out of RBF Send the transaction that replaces the mempool transaction and opts out of replaceability take original raw_tx_0 Set more fee skip re-signing the tx skip re-signing the tx Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them Now see if we can add the coins back to the utxo set by sending the exact txs again Reference tx should be valid on itself Skip re-signing the transaction for context independent checks from now on tx.deserialize(BytesIO(hex_str_to_bytes(node.signrawtransactionwithwallet(tx.serialize().hex())['hex']))) The following two validations prevent overflow of the output amounts (see CVE-2010-5139). Pick the input of the first tx we signed, so it has to be a coinbase tx A version currently non-standard Some non-standard script Some bare multisig script (2-of-3) Some not-pushonly scriptSig Some too large scriptSig (>1650 bytes) Use enough outputs to make the tx too large for our policy Make output smaller, such that it is dust for our policy Should be non-max, so locktime is not ignored We could include it in the second block mined from now, but not the very next one Can skip re-signing the tx because of early rejection | 1,793 | en | 0.832814 |
#!/usr/bin/env python
import os
import sys
import argparse
import subprocess
import glob
import math
from EMAN2 import *
def file_base(movie):
# return the filename and basename, exclude '.p3'
return movie, os.path.basename(os.path.splitext(movie)[0]).replace('.p3', '')
def check(log,c_p):
with open(log) as log_r:
lines = [line for line in log_r]
x0 = 0
y0 = 0
f = c_p['throw']
bad = []
while len(lines) > 0:
line1 = lines.pop(0)
if "...... Frame (" in line1:
line = line1.strip().split()
x = float(line[-2])
y = float(line[-1])
if math.sqrt((x - x0)**2 + (y - y0)**2) * c_p['apixr'] > c_p['target']:
bad += [f]
f += 1
x0 = x
y0 = y
return bad
def run_motioncor2(movie, c_p):
movie, basename = file_base(movie)
# generate the com file
out = basename+'_throw{:03}'.format(c_p['throw'])
o_com = out + '.com'
o_log = out + '.log'
o_mrc = out + '.mrc'
common = 'motioncor2 -InMrc {} -OutMrc {} -Iter 10 -Bft 100 -FtBin {} -Throw {} -FmRef -1 -Tilt {} {}'.format(movie,o_mrc,c_p['bin'],c_p['throw'],c_p['tilt'], c_p['gainref'])
with open(o_com, 'w') as o_com_w:
if c_p['local'] == 0:
o_com_w.write('{} -Patch 0 0'.format(common))
else:
o_com_w.write('{} -Patch {} {} -LogFile {} -FmDose {} -PixSize {} -kV {}'.format(common,c_p['patch'],c_p['patch'],out+'_',c_p['dose'],c_p['apixr'],c_p['voltage']))
# run the com
with open(o_log, 'w') as write_log:
subprocess.call(['sh', o_com], stdout=write_log, stderr=subprocess.STDOUT)
# check the shifts
bad = check(o_log,c_p)
# decide bad
decide(movie, bad, c_p)
def decide(movie, bad, c_p):
if bad == []:
if c_p['local'] == 0:
print "No bad frames. Do local now."
c_p['local'] = 1
run_motioncor2(movie, c_p)
else:
print "No bad frames. Local done for {}. Throwed the first {} frames.".format(movie, c_p['throw'])
elif max(bad) < c_p['maxthrow']:
c_p['throw'] = max(bad)
print "Throw the first {} frames.".format(c_p['throw']), "Bad frames: ", bad
run_motioncor2(movie, c_p)
else: # if too many bad frames
print '{} has too many bad frames: '.format(movie), bad
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <movies>
Output unfiltered and filtered sum using MotionCor2.
Automatically discard bad frames.
Needs:
'motioncor2' command (v1, Zheng et al., 2017)
'EMAN2' python module (v2.11, Tang et al., 2007)
"""
args_def = {'apix':1.315, 'apixr':0.6575, 'bin':1, 'patch':5, 'voltage':300, 'time':200, 'rate':7, 'target':5, 'tilt':'0 0', 'gainref':''}
parser = argparse.ArgumentParser()
parser.add_argument("movie", nargs='*', help="specify movies (mrc, mrcs, dm4) to be processed")
parser.add_argument("-a", "--apix", type=float, help="specify counting apix, by default {}".format(args_def['apix']))
parser.add_argument("-ar", "--apixr", type=float, help="specify real apix of input movie, by default {}".format(args_def['apixr']))
parser.add_argument("-b", "--bin", type=float, help="specify binning factor, by default {}".format(args_def['bin']))
parser.add_argument("-p", "--patch", type=int, help="specify the patch, by default {}".format(args_def['patch']))
parser.add_argument("-v", "--voltage", type=int, help="specify the voltage (kV), by default {}".format(args_def['voltage']))
parser.add_argument("-t", "--time", type=float, help="specify exposure time per frame in ms, by default {}".format(args_def['time']))
parser.add_argument("-r", "--rate", type=float, help="specify dose rate in e/pix/s (counting pixel, not superresolution), by default {}".format(args_def['rate']))
parser.add_argument("-ta", "--target", type=float, help="specify the target resolution, by default {}".format(args_def['target']))
parser.add_argument("-ti", "--tilt", type=str, help="specify the tilt, by default {}".format(args_def['tilt']))
parser.add_argument("-g", "--gainref", type=str, help="specify the gainref option, by default {}. e.g., '-Gain ../14sep05c_raw_196/norm-amibox05-0.mrc -RotGain 0 -FlipGain 1'".format(args_def['gainref']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
# get common parameters
dose = args.time/1000.0 * args.rate / args.apix ** 2
voltage = args.voltage
c_p = {'dose':dose, 'apix':args.apix, 'apixr':args.apixr, 'bin':args.bin, 'patch':args.patch, 'voltage':voltage, 'target':args.target, 'tilt':args.tilt, 'throw':0, 'gainref':args.gainref}
# loop over all the input movies
for movie in args.movie:
if movie[-3:] == '.gz':
subprocess.call(['gunzip', movie])
movie = movie[:-3]
basename = os.path.basename(os.path.splitext(movie)[0])
suffix = os.path.basename(os.path.splitext(movie)[1])
basename_raw = basename
# unify mrc and mrcs to mrcs format
m = basename+'.p3.mrcs'
if suffix in ['.mrc','.mrcs']:
os.symlink(movie, m)
movie, basename = file_base(m)
# get nimg
c_p['nimg'] = EMUtil.get_image_count(movie)
# convert dm4 to mrcs
if suffix == '.dm4':
for i in xrange(c_p['nimg']):
d=EMData(movie, i)
d.write_image(m, i)
movie, basename = file_base(m)
# here we assume 36e is the maximal dose that still contributes to visualization of protein side chains, and a total of 20e is the minimum to ensure good alignment. therefore, you can throw the first 16e at most.
c_p['maxthrow'] = min(16/dose, c_p['nimg'] - 20/dose)
# motioncor2
c_p['local'] = 0 #0 means no local, only global
c_p['throw'] = 0
run_motioncor2(movie, c_p)
# delete intermediate files, they contain '.p3.'
for i in glob.glob(basename_raw + '*.p3.*'):
os.unlink(i)
if __name__ == '__main__':
main()
| bin/p3motioncor2.py | 5,794 | !/usr/bin/env python return the filename and basename, exclude '.p3' generate the com file run the com check the shifts decide bad if too many bad frames get default values get common parameters loop over all the input movies unify mrc and mrcs to mrcs format get nimg convert dm4 to mrcs here we assume 36e is the maximal dose that still contributes to visualization of protein side chains, and a total of 20e is the minimum to ensure good alignment. therefore, you can throw the first 16e at most. motioncor20 means no local, only global delete intermediate files, they contain '.p3.' | 586 | en | 0.721719 |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.extractor import FrontExtractorOp
from mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor
from mo.front.kaldi.utils import read_learning_info
from mo.graph.graph import Node
class AffineComponentFrontExtractor(FrontExtractorOp):
op = 'affinecomponent'
enabled = True
@staticmethod
def extract(node: Node):
read_learning_info(node.parameters)
return FixedAffineComponentFrontExtractor.extract(node)
| model-optimizer/mo/front/kaldi/extractors/affine_component_ext.py | 1,072 | Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. | 562 | en | 0.864985 |
# import unittest
import pytest
# from ci_testing_python.app.identidock import app
if __name__ == '__main__':
# unittest.main()
pytest.main()
| tests/contract/test_contract_identidock.py | 147 | import unittest from ci_testing_python.app.identidock import app unittest.main() | 80 | en | 0.172913 |
import numpy as np
import matplotlib.pyplot as plt
import time
import csv
import os
import scipy.io as mat4py
import logging
logger = logging.getLogger("logger")
class ResultBuffer(object):
def __init__(self, log_path, episode_types):
self.log_path = log_path
self.current_episode = None
self.episodes = {e_type: list() for e_type in episode_types}
self.average_reward = 0.0
self.initial_reward = 0.0
self.average_reward_counter = 0
self.n_cluster = 0
for episode_type in self.episodes.keys():
with open(os.path.join(self.log_path,'{}.csv'.format(episode_type)), mode='w') as result_file:
writer = csv.writer(result_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.title_csv())
def update_episode(self, **kwargs):
if self.current_episode is None:
raise ValueError("There is no initiated episodes object")
self.current_episode.add(**kwargs)
def add_episode(self, episode_type, lr, noise_std, buffer_size):
if episode_type in self.episodes.keys():
idx = len(self.episodes[episode_type])
episode_name = "{}_{:03d}".format(episode_type,idx)
self.episodes[episode_type].append(Episode(episode_name, lr, noise_std, buffer_size, self.average_reward))
self.current_episode = self.episodes[episode_type][-1]
else:
raise ValueError("Invalid episode type added to result buffer")
def finalize_episode(self, update_average_reward=None):
self.current_episode.summarize()
if update_average_reward is not None:
new_average = self.current_episode.final_stats['online_rewards']
if np.abs(new_average-self.initial_reward) > 0.05:
self.initial_reward = new_average
self.average_reward_counter = 0
self.average_reward = (self.average_reward_counter * self.average_reward + new_average) / (self.average_reward_counter + 1)
self.average_reward_counter += 1
logger.info(self.current_episode)
self.write_all()
def write_all(self):
for episode_type in self.episodes.keys():
with open(os.path.join(self.log_path,'{}.csv'.format(episode_type)), mode='a') as result_file:
writer = csv.writer(result_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i, episode in enumerate(self.episodes[episode_type]):
if episode is not None:
if "eval" in episode.name:
try:
episode.save(self.log_path)
except:
logger.info("Saving state evolution failed")
writer.writerow(episode.csv())
self.episodes[episode_type][i] = None
@staticmethod
def title():
text = list()
text.append('{:^20}'.format('Epi'))
text.append('{:^10}'.format('time'))
text.append('{:^9}'.format('lr'))
text.append('{:^9}'.format('noise'))
text.append('{:^12}'.format('buffer size'))
text.append('{:^9}'.format('#of updates'))
text.append('{:^20}'.format('average_reward'))
text.append('{:^20}'.format('actor grad norm'))
text.append('{:^20}'.format('critic grad norm'))
text.append('{:^9}'.format('q_loss'))
text.append('{:^6}'.format('rewards'))
return " | ".join(text)
@staticmethod
def title_csv():
text = list()
text.append('{}'.format('Epi'))
text.append('{}'.format('time'))
text.append('{}'.format('lr'))
text.append('{}'.format('noise'))
text.append('{}'.format('buffer size'))
text.append('{}'.format('#of updates'))
text.append('{}'.format('average_reward'))
text.append('{}'.format('actor grad norm'))
text.append('{}'.format('critic grad norm'))
text.append('{}'.format('q_loss'))
text.append('{}'.format('rewards'))
return text
class Episode(object):
def __init__(self, name, lr, noise_std, buffer_size, average_reward):
# general stats
self.name = name
self.average_reward = average_reward
self.lr = lr
self.noise_std = noise_std
self.buffer_size = buffer_size
self.total_time = time.time()
# training stats
self.stats = dict()
self.final_stats = dict()
def add(self, **kwargs):
for key,val in kwargs.items():
if key not in self.stats.keys():
self.stats[key] = list()
self.stats[key].append(val)
def summarize(self):
# updates counter
if 'global_step_critic' in self.stats.keys():
self.final_stats['global_step'] = self.stats['global_step_critic']
# average rewards
if 'online_rewards' in self.stats.keys():
self.stats['online_rewards'] = np.array(self.stats['online_rewards'])
self.stats['online_rewards'] = np.reshape(self.stats['online_rewards'], [self.stats['online_rewards'].shape[1], -1])
self.final_stats['online_rewards'] = np.mean(self.stats['online_rewards'][:,10:])
# value function error
if 'q_loss' in self.stats.keys():
self.final_stats['q_loss'] = np.mean(self.stats['q_loss'])
# state/action/disturbance evolution
if 'states' in self.stats.keys():
self.final_stats['states'] = np.transpose(np.squeeze(np.array(self.stats['states'])))
if 'actions' in self.stats.keys():
self.final_stats['actions'] = np.swapaxes(np.array(self.stats['actions']), 0, 1)
if 'disturbance' in self.stats.keys():
self.final_stats['disturbance'] = np.transpose(np.array(self.stats['disturbance']))
# gradient stats
if 'g_norm_critic' in self.stats.keys():
self.final_stats['g_norm_critic'] = (np.mean(np.squeeze(np.array(self.stats['g_norm_critic']))),
np.min(np.squeeze(np.array(self.stats['g_norm_critic']))),
np.max(np.squeeze(np.array(self.stats['g_norm_critic']))))
if 'g_norm_actor' in self.stats.keys():
self.final_stats['g_norm_actor'] = (np.mean(np.squeeze(np.array(self.stats['g_norm_actor']))),
np.min(np.squeeze(np.array(self.stats['g_norm_actor']))),
np.max(np.squeeze(np.array(self.stats['g_norm_actor']))))
if 'global_step_actor' in self.stats.keys():
self.final_stats['global_step'] = self.stats['global_step_actor'][-1]
self.total_time = time.time() - self.total_time
del self.stats
def save(self, path):
mat4py.savemat(os.path.join(path, "states", 'states_evol.mat'), {'states': self.final_stats['states']})
mat4py.savemat(os.path.join(path, "states", 'actions_evol.mat'), {'actions': self.final_stats['actions']})
mat4py.savemat(os.path.join(path, "states", 'outputs_evol.mat'), {'disturbance': self.final_stats['disturbance']})
def csv(self):
text = list()
text.append('{}'.format(self.name))
text.append('{:.1f}'.format(self.total_time))
if "eval" not in self.name:
text.append('{:.2e}'.format(self.lr))
text.append('{:.2e}'.format(self.noise_std))
text.append('{}'.format(self.buffer_size))
text.append('{}'.format(self.final_stats['global_step']))
text.append('{:^20}'.format(self.average_reward))
if "eval" not in self.name:
text.append('{}'.format(self.final_stats['g_norm_actor']))
text.append('{}'.format(self.final_stats['g_norm_critic']))
text.append('{:.2e}'.format(self.final_stats['q_loss']))
text.append('{:.5f}'.format(self.final_stats['online_rewards']))
return text
def __repr__(self):
text = list()
text.append('{:^20}'.format(self.name))
text.append('{:^10.1f}'.format(self.total_time))
if "eval" not in self.name:
text.append('{:^9.2e}'.format(self.lr))
text.append('{:^9.2e}'.format(self.noise_std))
text.append('{:^d}'.format(self.buffer_size))
text.append('{}'.format(self.final_stats['global_step']))
text.append('{:^20}'.format(self.average_reward))
if "eval" not in self.name:
mi, ma, mea = self.final_stats['g_norm_actor']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
mi, ma, mea = self.final_stats['g_norm_critic']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
text.append('{:^10.2e}'.format(self.final_stats['q_loss']))
if "pol" in self.name:
mi, ma, mea = self.final_stats['g_norm_critic']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
text.append('{:^10.2e}'.format(self.final_stats['q_loss']))
if len(self.final_stats.keys()) > 0 :
text.append('{:^6.5f}'.format(self.final_stats['online_rewards']))
return " | ".join(text)
class Figure(object):
def __init__(self, name, log_path, y_data, x_data=None, options = None, labels = None):
self.fig = plt.figure()
self.fig.set_size_inches(18.5, 10.5)
for y in y_data:
plt.plot(x_data, y)
plt.legend(labels)
plt.title(" ".join(name.split("_")))
self.fig.savefig(os.path.join(log_path, "plots", name))
plt.close()
| result_buffer.py | 9,788 | general stats training stats updates counter average rewards value function error state/action/disturbance evolution gradient stats | 131 | en | 0.613093 |
"""Import a file from Illumina BaseSpace."""
import atexit
import gzip
import os
import time
import traceback
from pathlib import Path
from requests import RequestException, Session
from resolwe.process import (
BooleanField,
FileField,
GroupField,
IntegerField,
Persistence,
Process,
SecretField,
StringField,
)
class BaseSpaceDownloadError(Exception):
"""BaseSpace download error."""
pass
def download_file_repeatedly(
tries, session, file_id, file_name, expected_file_size, request_headers, error
):
"""Attempt to download BaseSpace file numerous times in case of errors."""
for i in range(tries):
try:
download_file(
session=session,
file_id=file_id,
file_name=file_name,
request_headers=request_headers,
error=error,
)
raise_for_file_corruption(
file_name=file_name, expected_file_size=expected_file_size, error=error
)
break
except BaseSpaceDownloadError:
if i + 1 == tries:
error("Could not download file from BaseSpace.")
else:
time.sleep(3)
def download_file(session, file_id, file_name, request_headers, error):
"""Download BaseSpace file."""
response = make_get_request(
session=session,
url=get_api_file_content_url(file_id=file_id),
headers=request_headers,
error=error,
stream=True,
)
try:
with open(file_name, "wb") as f:
chunk_size = 1024 * 1024 * 10
for chunk in response.iter_content(chunk_size=chunk_size):
f.write(chunk)
except FileNotFoundError:
error(f"Could not save file to {file_name}, due to directory not being found")
except PermissionError:
error(f"Could not save file to {file_name}, due to insufficient permissions")
except RequestException:
error(f"Could not save file to {file_name}, due to a network error")
def get_file_properties(session, file_id, request_headers, error):
"""Get file name and size (in bytes)."""
response = make_get_request(
session=session,
url=get_api_file_url(file_id=file_id),
headers=request_headers,
error=error,
)
info = response.json()["Response"]
return info["Name"], info["Size"]
def make_get_request(session, url, headers, error, stream=False):
"""Make a get request."""
response = session.get(url=url, headers=headers, stream=stream, timeout=60)
if response.status_code == 401:
error(f"Authentication failed on URL {url}")
elif response.status_code == 404:
error(f"BaseSpace file {url} not found")
elif response.status_code != 200:
error(f"Failed to retrieve content from {url}")
return response
def get_api_file_url(file_id):
"""Get BaseSpace API file URL."""
api_url = "https://api.basespace.illumina.com/v1pre3"
return f"{api_url}/files/{file_id}"
def get_api_file_content_url(file_id):
"""Get BaseSpace API file contents URL."""
return f"{get_api_file_url(file_id=file_id)}/content"
def output(output_option, value):
"""Print to standard output."""
if output_option == "full":
print(value)
elif output_option == "filename":
if value.startswith("filename="):
print(value[len("filename=") :])
def get_token_from_secret_file(secret_file_path, error):
"""Read secret file to obtain access token."""
try:
with open(secret_file_path, "r") as f:
return f.readline()
except FileNotFoundError:
error("Secret file not found")
except PermissionError:
error("No permissions to read secret file")
def on_exit(session):
"""Clean up function called on exit."""
session.close()
def raise_for_file_corruption(file_name, expected_file_size, error):
"""Raise an error if file does not pass integrity check."""
# Check file size.
actual_file_size = os.path.getsize(file_name)
if expected_file_size != actual_file_size:
error(
f"File's ({file_name}) expected size ({expected_file_size}) "
f"does not match its actual size ({actual_file_size})"
)
# Check gzip integrity.
if file_name.split(".")[-1] == "gz":
try:
with gzip.open(file_name, "rb") as f:
chunk_size = 1024 * 1024 * 10
while bool(f.read(chunk_size)):
pass
except OSError:
error(f"File {file_name} did not pass gzip integrity check")
class BaseSpaceImport(Process):
"""Import a file from Illumina BaseSpace."""
slug = "basespace-file-import"
name = "BaseSpace file"
process_type = "data:file"
version = "1.4.0"
category = "Import"
data_name = 'BaseSpace ({{ file_id|default("?") }})'
persistence = Persistence.TEMP
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {
"cores": 1,
"memory": 1024,
"network": True,
"secrets": True,
},
}
class Input:
"""Input fields to process BaseSpaceImport."""
file_id = StringField(label="BaseSpace file ID")
access_token_secret = SecretField(
label="BaseSpace access token",
description="BaseSpace access token secret handle needed to download the file.",
)
show_advanced = BooleanField(
label="Show advanced options",
default=False,
)
class Advanced:
"""Advanced options."""
output = StringField(
label="Output",
allow_custom_choice=False,
choices=[("full", "Full"), ("filename", "Filename")],
default="filename",
description="Sets what is printed to standard output. "
"Argument 'Full' outputs everything, "
"argument 'Filename' outputs only file names of downloaded files.",
)
tries = IntegerField(
label="Tries",
description="Number of tries to download a file before giving up.",
range=[1, 10],
default=3,
)
verbose = BooleanField(
label="Verbose",
default=False,
description="Print detailed exception information to standard output "
"when error occurs. Output argument had no effect on this argument.",
)
advanced = GroupField(
Advanced, label="Advanced options", hidden="!show_advanced"
)
class Output:
"""Output fields to process BaseSpaceImport."""
file = FileField(label="File with reads")
def run(self, inputs, outputs):
"""Run import."""
secret_path = Path("/secrets") / inputs.access_token_secret["handle"]
session = Session()
atexit.register(on_exit, session)
try:
file_id = inputs.file_id
access_token = get_token_from_secret_file(
secret_file_path=secret_path, error=self.error
)
headers = {"x-access-token": access_token}
file_name, file_size = get_file_properties(
session=session,
file_id=file_id,
request_headers=headers,
error=self.error,
)
download_file_repeatedly(
tries=inputs.advanced.tries,
session=session,
file_id=file_id,
file_name=file_name,
expected_file_size=file_size,
request_headers=headers,
error=self.error,
)
output(inputs.advanced.output, f"filename={file_name}")
except Exception as error:
if inputs.advanced.verbose:
traceback.print_exc()
self.error(
"Unexpected error occurred while trying to download files from BaseSpace. "
"Check standard output for more details."
)
else:
print(str(error))
self.error(
"Unexpected error occurred while trying to download files from BaseSpace. "
"Set Verbose to True to see the traceback."
)
outputs.file = file_name
| resolwe_bio/processes/import_data/basespace.py | 8,660 | Advanced options.
BaseSpace download error.
Import a file from Illumina BaseSpace.
Input fields to process BaseSpaceImport.
Output fields to process BaseSpaceImport.
Download BaseSpace file.
Attempt to download BaseSpace file numerous times in case of errors.
Get BaseSpace API file contents URL.
Get BaseSpace API file URL.
Get file name and size (in bytes).
Read secret file to obtain access token.
Make a get request.
Clean up function called on exit.
Print to standard output.
Raise an error if file does not pass integrity check.
Run import.
Import a file from Illumina BaseSpace.
Check file size. Check gzip integrity. | 626 | en | 0.719908 |
from kafka import KafkaProducer
from kafka import KafkaConsumer
from kafka import KafkaAdminClient
import json
from json import dumps
from json import loads
import time
import os
import requests
import sys
import GE_GSCH_low_define as lowDefine
'''
{'requestID': 'req-f6720a0e-e3df-455a-825d-f8c80cedc2d9',
'date': '2021-10-18 13:46:30', 'status': 'create',
'fileID': 'b469e54a-721f-4c55-b43e-d09088556031', 'failCnt': 0,
'env': {
'type': 'global',
'targetClusters': ['c1', ['c2', 'c3'], 'c4'],
'priority': 'GLowLatencyPriority',
'option': {
'sourceCluster': 'c1',
'sourceNode': 'a-worker-node01'
}
}
}
'''
class GLowLatencyPriority_Job:
def __init__(self,request_data_dic):
self.job_name = lowDefine.SELF_POLICY_NAME
self.requestDataDic = request_data_dic
self.requestID=request_data_dic['requestID']
self.fileID=request_data_dic['fileID']
self.failCnt=request_data_dic['failCnt']
self.env=request_data_dic['env']
self.targetClusters=self.env['targetClusters']
self.sourceCluster=self.env['option']['sourceCluster']
self.sourceNode=self.env['option']['sourceNode']
self.sharedClusters = self.get_shared_clusters()
self.producer= KafkaProducer(acks=0,
compression_type='gzip',
bootstrap_servers=[lowDefine.KAFKA_SERVER_URL],
value_serializer=lambda x: dumps(x).encode('utf-8'))
def get_shared_clusters(self):
for item in self.targetClusters :
if type(item).__name__ == list :
if len(item) > 1 :
return item
else :
return None
else :
print()
#apply low-latency yaml with
def check_res_fail(self, res):
if res == None:
return True
if 'hcode' not in res:
return True
if 'lcode' not in res:
return True
if 'msg' not in res:
return True
if 'result' not in res['msg']:
return True
return False
def request_clusters_latency_from_clusterAgent(self,clusters):
try :
temp_msg = {'source':{'type':'none'},
'target':{'type':'cluster', 'object':self.sourceCluster},
'hcode':200,
'lcode':1,
'msg':{'requestID': self.requestID,'sourceNode': self.sourceNode,'targetClusters': clusters }
}
self.producer.send(lowDefine.GLOBAL_SCHEDULER_GLOBAL_TOPIC_NAME,value=temp_msg)
self.producer.flush()
except:
return 'process_fail'
return 'process_success'
def wait_request_clusters_latency_from_clusterAgent(self):
ordered_cluster_list =[]
res = self.wait_consumer()
if res == None:
print('res is None')
return 'process_fail', ordered_cluster_list
is_process_fail = self.check_res_fail(res)
hcode = res['hcode']
lcode = res['lcode']
result = res['msg']['result']
'''
result: [ {cluster: c3, latency: 11 },
{cluster: c2, latency: 34 } ]
'''
if is_process_fail:
print('Fail Job:', res)
return 'process_fail', ordered_cluster_list
else:
if hcode == 200 and lcode == 2:
for item in result :
ordered_cluster_list.append(item['cluster'])
return 'process_success', ordered_cluster_list
else :
return 'process_fail', ordered_cluster_list
def apply_yaml_to_ClusterAgent(self,cluster):
print('apply_yaml_to_ClusterAgent:',cluster)
try :
temp_msg = {'source':{'type':'none'},
'target':{'type':'cluster', 'object':cluster},
'hcode':210,
'lcode':1,
'msg':{'requestID': self.requestID,'fileID':self.fileID,'requestData':self.requestDataDic }
}
self.producer.send(lowDefine.GLOBAL_SCHEDULER_GLOBAL_TOPIC_NAME,value=temp_msg)
self.producer.flush()
except:
return 'process_fail'
return 'process_success'
def wait_apply_yaml_to_ClusterAgent(self):
res = self.wait_consumer()
if res == None:
print('res is None')
return 'process_fail'
is_process_fail = self.check_res_fail(res)
hcode = res['hcode']
lcode = res['lcode']
result = res['msg']['result']
print('hcode :hcode,result',hcode,lcode,result)
if is_process_fail:
print('Fail Job:', res)
return 'process_fail'
else:
if hcode == 210 and lcode == 2:
if result == 'success' :
return 'apply_success'
elif result == 'fail' :
return 'apply_fail'
elif result == 'cancel' :
return 'cancel'
else :
return 'process_fail'
else:
return 'process_fail'
def wait_consumer(self):
print('wait_consumer')
consumer = KafkaConsumer(
self.requestID,
bootstrap_servers=[lowDefine.KAFKA_SERVER_URL],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=self.requestID,
value_deserializer=lambda x: loads(x.decode('utf-8')),
consumer_timeout_ms=1000*10
)
print('w-1')
res = None
for message in consumer:
print("Topic: %s, Partition: %d, Offset: %d, Key: %s, Value: %s" % ( message.topic, message.partition, message.offset, message.key, message.value ))
res = message.value
break
consumer.close()
return res
def start_job_processor():
print('start_job_processor')
while 1 :
#read dispatched queue
print('1')
try :
res = requests.get(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/policys/'+lowDefine.SELF_POLICY_NAME)
except:
print('wait front server to run',lowDefine.FRONT_SERVER_SERVER_URL)
time.sleep(5)
continue
if res.status_code == 200 :
print('2')
request_data_dic = json.loads(res.json())
print('request_data_dic',request_data_dic)
GE_Request_Job = GLowLatencyPriority_Job(request_data_dic)
print('3')
#send topic message
'''
return values
'apply_success' : apply is success
'process_success' :
'process_fail': raise error in process(apply or wait consumer, request latency)
'apply_fail' : apply is fail
'''
is_whole_process_status = None
for item in GE_Request_Job.targetClusters :
print('type(item)',type(item),item)
if type(item).__name__ == 'list' and len(item) > 1 :
r = GE_Request_Job.request_clusters_latency_from_clusterAgent(item)
if r == 'process_fail' :
print('internal error : request_clusters_latency_from_clusterAgent')
continue
r,clusters = GE_Request_Job.wait_request_clusters_latency_from_clusterAgent()
if r == 'process_fail' :
print('internal error : wait_request_clusters_latency_from_clusterAgent')
continue
for t_cluster in clusters:
r = GE_Request_Job.apply_yaml_to_ClusterAgent(t_cluster)
if r == 'process_fail' :
print('internal error : apply_yaml_to_ClusterAgent')
continue
r = GE_Request_Job.wait_apply_yaml_to_ClusterAgent()
if r == 'process_fail' :
print('internal error : wait_apply_yaml_to_ClusterAgent')
continue
elif r == 'apply_success' or r == 'cancel':
print('---pply_success or cancel',r)
is_whole_process_status = r
break
elif r == 'apply_fail' :
is_whole_process_status = r
continue
if r == 'apply_success' or r == 'cancel':
break
else :
r = GE_Request_Job.apply_yaml_to_ClusterAgent(item)
if r == 'process_fail' :
print('internal error : apply_yaml_to_ClusterAgent')
continue
r = GE_Request_Job.wait_apply_yaml_to_ClusterAgent()
if r == 'process_fail' :
print('internal error : wait_apply_yaml_to_ClusterAgent')
continue
elif r == 'apply_success' or r == 'cancel':
is_whole_process_status = r
print('apply_success or cancel:',r)
break
elif r == 'apply_fail':
is_whole_process_status = r
print('apply_fail')
continue
print('==============')
if is_whole_process_status == 'apply_fail' :
#GE_Request_Job.requestDataDic['status'] = 'failed'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/failed')
elif is_whole_process_status == 'apply_success' :
#GE_Request_Job.requestDataDic['status'] = 'completed'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/completed')
elif is_whole_process_status == 'cancel' :
#GE_Request_Job.requestDataDic['status'] = 'cancel'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/canceled')
else :
#GE_Request_Job.requestDataDic['status'] = 'cancel'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/canceled')
else:
print('despatched queue is empty')
time.sleep(5)
continue
#time.sleep(1)
if __name__ == '__main__':
start_job_processor()
| gs-scheduler/global_scheduler2/policy_dockerfile/lowlatency/GE_GSCH_low_latency.py | 10,937 | apply low-latency yaml with read dispatched queuesend topic message GE_Request_Job.requestDataDic['status'] = 'failed'GE_Request_Job.requestDataDic['status'] = 'completed'GE_Request_Job.requestDataDic['status'] = 'cancel'GE_Request_Job.requestDataDic['status'] = 'cancel'time.sleep(1) | 284 | en | 0.537287 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'ListTopicKeysResult',
'AwaitableListTopicKeysResult',
'list_topic_keys',
]
@pulumi.output_type
class ListTopicKeysResult:
"""
Namespace/ServiceBus Connection String
"""
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
"""
Primary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
"""
Secondary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the authorization rule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
Primary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
Secondary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListTopicKeysResult(ListTopicKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListTopicKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def list_topic_keys(authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
topic_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListTopicKeysResult:
"""
Namespace/ServiceBus Connection String
API Version: 2017-04-01.
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
:param str topic_name: The topic name.
"""
__args__ = dict()
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
__args__['topicName'] = topic_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus:listTopicKeys', __args__, opts=opts, typ=ListTopicKeysResult).value
return AwaitableListTopicKeysResult(
alias_primary_connection_string=__ret__.alias_primary_connection_string,
alias_secondary_connection_string=__ret__.alias_secondary_connection_string,
key_name=__ret__.key_name,
primary_connection_string=__ret__.primary_connection_string,
primary_key=__ret__.primary_key,
secondary_connection_string=__ret__.secondary_connection_string,
secondary_key=__ret__.secondary_key)
| sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | 6,588 | Namespace/ServiceBus Connection String
Primary connection string of the alias if GEO DR is enabled
Secondary connection string of the alias if GEO DR is enabled
A string that describes the authorization rule.
Namespace/ServiceBus Connection String
API Version: 2017-04-01.
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
:param str topic_name: The topic name.
Primary connection string of the created namespace authorization rule.
A base64-encoded 256-bit primary key for signing and validating the SAS token.
Secondary connection string of the created namespace authorization rule.
A base64-encoded 256-bit primary key for signing and validating the SAS token.
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** pylint: disable=using-constant-test | 1,017 | en | 0.70987 |
"""
Base backend
Trace and Database classes from the other modules should Subclass the base
classes.
"""
import PyMC2
class Trace(object):
"""Dummy Trace class.
"""
def __init__(self,value=None, obj=None):
"""Assign an initial value and an internal PyMC object."""
self._trace = value
if obj is not None:
if isinstance(obj, PyMC2.PyMCBase):
self._obj = obj
else:
raise AttributeError, 'Not PyMC object', obj
def _initialize(self, length):
"""Dummy method. Subclass if necessary."""
pass
def tally(self, index):
"""Dummy method. Subclass if necessary."""
pass
def truncate(self, index):
"""Dummy method. Subclass if necessary."""
pass
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Dummy method. Subclass if necessary.
Input:
- burn (int): The number of transient steps to skip.
- thin (int): Keep one in thin.
- chain (int): The index of the chain to fetch. If None, return all chains.
- slicing: A slice, overriding burn and thin assignement.
"""
raise AttributeError, self._obj.__name__ + " has no trace"
__call__ = gettrace
## def obj():
## def fset(self, obj):
## if isinstance(obj, PyMC2.PyMCBase):
## self.__obj = obj
## else:
## raise AttributeError, 'Not PyMC object'
## def fget(self):
## return self.__obj
## return locals()
## obj = property(**obj())
def _finalize(self):
pass
class Database(object):
"""Dummy Database backend"""
def __init__(self):
"""Get the Trace from the local scope."""
self.Trace = Trace
def _initialize(self, length):
"""Tell the traces to initialize themselves."""
for o in self.model._pymc_objects_to_tally:
o.trace._initialize(length)
def tally(self, index):
"""Dummy method. Subclass if necessary."""
for o in self.model._pymc_objects_to_tally:
o.trace.tally(index)
def connect(self, sampler):
"""Link the Database to the Sampler instance.
If database is loaded from a file, restore the objects trace
to their stored value, if a new database is created, instantiate
a Trace for the PyMC objects to tally.
"""
if isinstance(sampler, PyMC2.Sampler):
self.model = sampler
else:
raise AttributeError, 'Not a Sampler instance.'
if hasattr(self, '_state_'):
# Restore the state of the Sampler.
for o in sampler._pymc_objects_to_tally:
o.trace = getattr(self, o.__name__)
o.trace._obj = o
else:
# Set a fresh new state
for o in sampler._pymc_objects_to_tally:
o.trace = self.Trace(obj=o)
for o in sampler._pymc_objects_to_tally:
o.trace.db = self
def _finalize(self):
"""Tell the traces to finalize themselves."""
for o in self.model._pymc_objects_to_tally:
o.trace._finalize()
def close(self):
"""Close the database."""
pass
def savestate(self, state):
"""Store a dictionnary containing the state of the Sampler and its
SamplingMethods."""
self._state_ = state
def getstate(self):
"""Return a dictionary containing the state of the Sampler and its
SamplingMethods."""
return self._state_
| PyMC2/database/base.py | 3,752 | def obj(): def fset(self, obj): if isinstance(obj, PyMC2.PyMCBase): self.__obj = obj else: raise AttributeError, 'Not PyMC object' def fget(self): return self.__obj return locals() obj = property(**obj()) Restore the state of the Sampler. Set a fresh new state | 346 | en | 0.395065 |
from utils import TreeNode, binary_tree
class Solution:
def __init__(self):
self.index = 0 # 利用[中序遍历左边元素数量 = 左子树节点总数]可以省掉这个计数的字段
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if not preorder:
return None
def build_node(lo, hi):
node = TreeNode(preorder[self.index])
self.index += 1
j = inorder.index(node.val, lo, hi) # 有些解法生成字典加快这步,但这会增大空间复杂度
if self.index < len(preorder) and preorder[self.index] in inorder[lo:j]:
node.left = build_node(lo, j)
if self.index < len(preorder) and preorder[self.index] in inorder[j + 1:hi]:
node.right = build_node(j + 1, hi)
return node
return build_node(0, len(preorder))
if __name__ == '__main__':
x = Solution().buildTree([1, 2, 4, 6, 5, 7, 8, 3, 9], [4, 6, 2, 7, 5, 8, 1, 9, 3])
x = Solution().buildTree([3, 9, 20, 15, 7], [9, 3, 15, 20, 7])
| medium/Q105_ConstructBinaryTreeFromPreorderAndInorderTraversal.py | 1,016 | :type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
利用[中序遍历左边元素数量 = 左子树节点总数]可以省掉这个计数的字段 有些解法生成字典加快这步,但这会增大空间复杂度 | 129 | zh | 0.848514 |
#
# Copyright (c) 2019, 2021 by Delphix. All rights reserved.
#
import dlpx.virtualization.api
from dlpx.virtualization.common.util import to_str
def get_virtualization_api_version():
"""Returns the Virutalization API version string.
:return: version string
"""
return to_str(dlpx.virtualization.api.__version__)
| platform/src/main/python/dlpx/virtualization/platform/util.py | 332 | Returns the Virutalization API version string.
:return: version string
Copyright (c) 2019, 2021 by Delphix. All rights reserved. | 131 | en | 0.759711 |
# -*- coding: utf-8 -*-
from ..base import Property
from .array import StateVector
from .base import Type
class Particle(Type):
"""
Particle type
A particle type which contains a state and weight
"""
state_vector: StateVector = Property(doc="State vector")
weight: float = Property(doc='Weight of particle')
parent: 'Particle' = Property(default=None, doc='Parent particle')
def __init__(self, state_vector, weight, parent=None, *args, **kwargs):
if parent:
parent.parent = None
if state_vector is not None and not isinstance(state_vector, StateVector):
state_vector = StateVector(state_vector)
super().__init__(state_vector, weight, parent, *args, **kwargs)
@property
def ndim(self):
return self.state_vector.shape[0]
| stonesoup/types/particle.py | 821 | Particle type
A particle type which contains a state and weight
-*- coding: utf-8 -*- | 88 | en | 0.901478 |
from heapq import heappush, nsmallest
import numpy as np
class NearestNeighbor():
def __init__(self, embeddings, encodings, config):
self.embeddings = embeddings
self.encodings = encodings
self.config = config
def euclidian_distance(self, e1, e2):
'''
https://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy
'''
return np.linalg.norm(e1 - e2)
def get_embedding(self, word):
if self.encodings.word_in_vocab(word):
return self.embeddings[word]
return self.embeddings[config.unknown_word]
def nearest_neighbors(self, word, count=1):
embedding = self.get_embedding(word)
heap = []
# TODO: is it faster to not have the the string comparision and instead always
# remove the first element of the array which will have a distance of 0
# TODO: implement faster solution than the heap where it only keeps track of K
# values which should vastly reduce the number of operations required.
for w in self.embeddings:
if w == word:
continue
dist = self.euclidian_distance(embedding, self.embeddings[w])
heappush(heap, (dist, w))
return nsmallest(count, heap)
| Word2Vec/NearestNeighbor.py | 1,325 | https://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy
TODO: is it faster to not have the the string comparision and instead always remove the first element of the array which will have a distance of 0 TODO: implement faster solution than the heap where it only keeps track of K values which should vastly reduce the number of operations required. | 406 | en | 0.860146 |
# Copyright (c) 2013, igrekus and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from dc_plc.custom.utils import add_completeness, add_query_relevance
from dc_plc.controllers.stats_query import get_procmap_stats
def execute(filters=None):
columns = get_columns()
data = get_data(filters)
return columns, data
def get_columns():
return [
"ID:Link/DC_PLC_Product_Summary",
_("Relevance"),
_("Progress"),
_("RnD Title"),
_("Function"),
_("External number"),
_("Process map"),
_("Internal number")
]
def get_data(filters):
res = get_procmap_stats(filters)
has_perms = 'DC_PLC_Process_Map_Specialist' in frappe.get_roles(frappe.session.user)
res = [add_completeness(row, [4]) for row in res]
res = [add_query_relevance(row, has_perms) for row in res]
return res
| dc_plc/dc_plc/report/dc_product_procmap_stats/dc_product_procmap_stats.py | 886 | Copyright (c) 2013, igrekus and contributors For license information, please see license.txt | 92 | en | 0.674615 |
# module for distance computation;
import numpy as np
def dist(arraya, arrayb, mode):
if mode == 0:
dis = np.sum(np.abs(np.subtract(arraya, arrayb)))
elif mode == 1:
dis = np.sqrt(np.sum(np.power(np.subtract(arraya, arrayb), 2)))
else:
dis = 1 - np.dot(arraya, arrayb) / np.sqrt(np.sum(np.power(arraya, 2)) * np.sum(np.power(arrayb, 2)))
return dis
def corr(arraya, arrayb, show):
a = np.subtract(arraya, np.mean(arraya))
b = np.subtract(arrayb, np.mean(arrayb))
corr = np.sum(np.multiply(a, b)) / np.sqrt(np.multiply(np.sum(np.power(a, 2)), np.sum(np.power(b, 2))))
return corr | DCS311 Artificial Intelligence/KNN/lab1_code/M3/dist.py | 638 | module for distance computation; | 32 | en | 0.449301 |
"""Run CVEjob."""
import sys
from decimal import Decimal
import multiprocessing
import nvdlib
from nvdlib.manager import FeedManager
from nvdlib.query_selectors import in_range
from cvejob.filters.input import validate_cve
from cvejob.config import Config
from cvejob.identifiers import get_identifier_cls
from cvejob.cpe2pkg import get_pkgfile_path, PackageNameCandidate
from cvejob.selectors.basic import VersionSelector
from cvejob.outputs.victims import VictimsYamlOutput
from cvejob.versions import NVDVersions
from cvejob.utils import parse_date_range
import logging
# logging configuration
logging.basicConfig(level=logging.DEBUG,
handlers=[nvdlib.get_logging_handler()]) # use nvdlib's handler
logger = logging.getLogger('cvejob')
FEED_NAME_PATTERN = r"nvdcve-" \
r"(?P<version>[\d.]+)-" \
r"(?P<name>(?P<name_string>(([A-Za-z]+)))|(?P<name_year>([\d]+)))" \
r".json"
def _log_results(victims_output):
"""Log results."""
cve_id = victims_output.cve.id_
logger.info(
"[{cve_id}] picked `{winner}` out of `{candidates}`".format(
cve_id=cve_id,
winner=victims_output.winner,
candidates=victims_output.candidates
))
logger.info(
"[{cve_id}] Affected version range: {version_ranges}".format(
cve_id=cve_id,
version_ranges=victims_output.affected_versions
))
logger.info(
"[{cve_id}] Safe version range: {version_ranges}".format(
cve_id=cve_id,
version_ranges=victims_output.safe_versions
))
def _filter_collection(collection, date_range, cherry_pick):
"""Filter Document collection."""
if date_range:
collection_size_before = collection.count()
collection = collection.find(
{'published_date': in_range(*date_range)}
)
logger.debug(("Filtered out {} Documents that do not fall "
"in the given range.").format(
collection_size_before - collection.count()
))
if cherry_pick:
logger.debug("Cherry-picked CVE `{cve_id}`".format(
cve_id=cherry_pick
))
collection = collection.find(
{'cve.id_': cherry_pick}
)
return collection
def run():
"""Run CVEjob."""
feed_dir = Config.feed_dir
feed_names = Config.feed_names
date_range = Config.date_range
cherrypicked_cve_id = Config.cve_id
cherrypicked_year = None
if cherrypicked_cve_id:
cherrypicked_year = cherrypicked_cve_id.split(sep='-')[1]
if int(cherrypicked_year) < 2002:
# all CVEs prior to 2002 are stored in 2002 feed
cherrypicked_year = 2002
if date_range:
date_range = parse_date_range(Config.date_range)
feed_names = range(date_range[0].year, date_range[1].year + 1)
if cherrypicked_cve_id: # optimization check
if int(cherrypicked_year) not in feed_names:
logger.info(
"[{picked_cve_id}] does not belong to the given feed range:"
" {date_range}".format(
picked_cve_id=cherrypicked_cve_id,
date_range=date_range
))
return
# prune the feed names as it is not necessary to iterate over all of them
feed_names = [cherrypicked_year]
if not feed_names:
if cherrypicked_cve_id:
feed_names = [cherrypicked_year]
else:
feed_names = ['modified']
with FeedManager(n_workers=multiprocessing.cpu_count()) as feed_manager:
feeds = feed_manager.fetch_feeds(
feed_names=feed_names, data_dir=feed_dir, update=True
)
collection = feed_manager.collect(feeds)
collection = _filter_collection(collection,
date_range,
cherrypicked_cve_id)
if not collection: # collection is empty
logger.info(
"Collection is empty.".format(
picked_cve_id=cherrypicked_cve_id,
))
return
logger.debug("Number of CVE Documents in the collection: {}".format(
collection.count()
))
if Config.package_name and Config.cve_id:
# user knows the package name, so we don't have to guess ;)
doc = [x for x in collection][0] # Collection doesn't support indexing
affected, safe = NVDVersions(doc, Config.package_name, Config.ecosystem).run()
victims_output = VictimsYamlOutput(
ecosystem=Config.ecosystem,
cve_doc=doc,
winner=PackageNameCandidate(Config.package_name, Decimal('1.0')),
candidates=[],
affected=affected,
fixedin=safe
)
_log_results(victims_output)
victims_output.write()
sys.exit(0)
for doc in collection:
cve_id = doc.cve.id_
try:
if not validate_cve(doc):
logger.debug(
"[{cve_id}] was filtered out by input checks".format(
cve_id=cve_id
))
continue
pkgfile_path = get_pkgfile_path(Config.pkgfile_dir, Config.ecosystem)
identifier = get_identifier_cls()(doc, Config.ecosystem, pkgfile_path)
candidates = identifier.identify()
if not candidates:
logger.info(
"[{cve_id}] no package name candidates found".format(
cve_id=cve_id
))
continue
selector = VersionSelector(doc, candidates, Config.ecosystem)
winner = selector.pick_winner()
if not winner:
logger.info(
"[{cve_id}] no package name found".format(
cve_id=cve_id
))
continue
affected, safe = NVDVersions(doc, winner.package, Config.ecosystem).run()
victims_output = VictimsYamlOutput(
ecosystem=Config.ecosystem,
cve_doc=doc,
winner=winner,
candidates=candidates,
affected=affected,
fixedin=safe
)
_log_results(victims_output)
victims_output.write()
except Exception as exc:
logger.warning(
"[{cve_id}] Unexpected exception occurred: {exc}".format(
cve_id=cve_id,
exc=exc
), exc_info=True)
if __name__ == '__main__':
run()
| run.py | 6,764 | Filter Document collection.
Log results.
Run CVEjob.
Run CVEjob.
logging configuration use nvdlib's handler all CVEs prior to 2002 are stored in 2002 feed optimization check prune the feed names as it is not necessary to iterate over all of them collection is empty user knows the package name, so we don't have to guess ;) Collection doesn't support indexing | 361 | en | 0.908416 |
# This file is generated by C:\projects\numpy-wheels\numpy\setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
blas_opt_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('ATLAS_INFO', '"\\"None\\""')], 'libraries': ['numpy-atlas']}
lapack_mkl_info={}
lapack_opt_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'f77', 'libraries': ['numpy-atlas', 'numpy-atlas'], 'define_macros': [('ATLAS_INFO', '"\\"None\\""')]}
atlas_3_10_blas_threads_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('ATLAS_INFO', '"\\"None\\""')], 'libraries': ['numpy-atlas']}
atlas_3_10_threads_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'f77', 'libraries': ['numpy-atlas', 'numpy-atlas'], 'define_macros': [('ATLAS_INFO', '"\\"None\\""')]}
openblas_info={}
blas_mkl_info={}
openblas_lapack_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| python-3.4.4.amd64/Lib/site-packages/numpy/__config__.py | 1,798 | This file is generated by C:\projects\numpy-wheels\numpy\setup.py It contains system_info results at the time of building this package. | 135 | en | 0.918881 |
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import functools
import logging
from os import path
import boto3
import jsonschema
from c7n_mailer import deploy, utils
from c7n_mailer.azure_mailer.azure_queue_processor import MailerAzureQueueProcessor
from c7n_mailer.azure_mailer import deploy as azure_deploy
from c7n_mailer.sqs_queue_processor import MailerSqsQueueProcessor
from c7n_mailer.utils import get_provider, Providers
from ruamel import yaml
AZURE_KV_SECRET_SCHEMA = {
'type': 'object',
'properties': {
'type': {'enum': ['azure.keyvault']},
'secret': {'type': 'string'}
},
'required': ['type', 'secret'],
'additionalProperties': False
}
SECURED_STRING_SCHEMA = {
'oneOf': [
{'type': 'string'},
AZURE_KV_SECRET_SCHEMA
]
}
CONFIG_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['queue_url'],
'properties': {
'queue_url': {'type': 'string'},
'from_address': {'type': 'string'},
'contact_tags': {'type': 'array', 'items': {'type': 'string'}},
'org_domain': {'type': 'string'},
# Standard Lambda Function Config
'region': {'type': 'string'},
'role': {'type': 'string'},
'runtime': {'type': 'string'},
'memory': {'type': 'integer'},
'timeout': {'type': 'integer'},
'subnets': {'type': 'array', 'items': {'type': 'string'}},
'security_groups': {'type': 'array', 'items': {'type': 'string'}},
'dead_letter_config': {'type': 'object'},
'lambda_name': {'type': 'string'},
'lambda_description': {'type': 'string'},
'lambda_tags': {'type': 'object'},
'lambda_schedule': {'type': 'string'},
# Azure Function Config
'function_properties': {
'type': 'object',
'appInsights': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string'}
}
]
},
'storageAccount': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string'}
}
]
},
'servicePlan': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string',
'skuTier': 'string',
'skuName': 'string'}
}
]
},
},
'function_schedule': {'type': 'string'},
'function_skuCode': {'type': 'string'},
'function_sku': {'type': 'string'},
# Mailer Infrastructure Config
'cache_engine': {'type': 'string'},
'smtp_server': {'type': 'string'},
'smtp_port': {'type': 'integer'},
'smtp_ssl': {'type': 'boolean'},
'smtp_username': {'type': 'string'},
'smtp_password': SECURED_STRING_SCHEMA,
'ldap_email_key': {'type': 'string'},
'ldap_uid_tags': {'type': 'array', 'items': {'type': 'string'}},
'debug': {'type': 'boolean'},
'ldap_uid_regex': {'type': 'string'},
'ldap_uri': {'type': 'string'},
'ldap_bind_dn': {'type': 'string'},
'ldap_bind_user': {'type': 'string'},
'ldap_uid_attribute': {'type': 'string'},
'ldap_manager_attribute': {'type': 'string'},
'ldap_email_attribute': {'type': 'string'},
'ldap_bind_password_in_kms': {'type': 'boolean'},
'ldap_bind_password': {'type': 'string'},
'cross_accounts': {'type': 'object'},
'ses_region': {'type': 'string'},
'redis_host': {'type': 'string'},
'redis_port': {'type': 'integer'},
'datadog_api_key': {'type': 'string'}, # TODO: encrypt with KMS?
'datadog_application_key': {'type': 'string'}, # TODO: encrypt with KMS?
'slack_token': {'type': 'string'},
'slack_webhook': {'type': 'string'},
'sendgrid_api_key': SECURED_STRING_SCHEMA,
'splunk_hec_url': {'type': 'string'},
'splunk_hec_token': {'type': 'string'},
'splunk_remove_paths': {
'type': 'array',
'items': {'type': 'string'}
},
'splunk_actions_list': {'type': 'boolean'},
'splunk_max_attempts': {'type': 'integer'},
'splunk_hec_max_length': {'type': 'integer'},
# SDK Config
'profile': {'type': 'string'},
'http_proxy': {'type': 'string'},
'https_proxy': {'type': 'string'},
# Mapping account / emails
'account_emails': {'type': 'object'}
}
}
def session_factory(mailer_config):
return boto3.Session(
region_name=mailer_config['region'],
profile_name=mailer_config.get('profile', None))
def get_logger(debug=False):
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
logging.getLogger('botocore').setLevel(logging.WARNING)
if debug:
logging.getLogger('botocore').setLevel(logging.DEBUG)
debug_logger = logging.getLogger('custodian-mailer')
debug_logger.setLevel(logging.DEBUG)
return debug_logger
else:
return logging.getLogger('custodian-mailer')
def get_and_validate_mailer_config(args):
with open(args.config) as fh:
config = yaml.load(fh.read(), Loader=yaml.SafeLoader)
jsonschema.validate(config, CONFIG_SCHEMA)
utils.setup_defaults(config)
return config
def get_c7n_mailer_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', required=True, help='mailer.yml config file')
debug_help_msg = 'sets c7n_mailer logger to debug, for maximum output (the default is INFO)'
parser.add_argument('--debug', action='store_true', help=debug_help_msg)
max_num_processes_help_msg = 'will run the mailer in parallel, integer of max processes allowed'
parser.add_argument('--max-num-processes', type=int, help=max_num_processes_help_msg)
templates_folder_help_msg = 'message templates folder location'
parser.add_argument('-t', '--templates', help=templates_folder_help_msg)
group = parser.add_mutually_exclusive_group(required=True)
update_lambda_help_msg = 'packages your c7n_mailer, uploads the zip to aws lambda as a function'
group.add_argument('--update-lambda', action='store_true', help=update_lambda_help_msg)
run_help_msg = 'run c7n-mailer locally, process sqs messages and send emails or sns messages'
group.add_argument('--run', action='store_true', help=run_help_msg)
return parser
def run_mailer_in_parallel(processor, max_num_processes):
max_num_processes = int(max_num_processes)
if max_num_processes < 1:
raise Exception
processor.max_num_processes = max_num_processes
processor.run(parallel=True)
def main():
parser = get_c7n_mailer_parser()
args = parser.parse_args()
mailer_config = get_and_validate_mailer_config(args)
args_dict = vars(args)
logger = get_logger(debug=args_dict.get('debug', False))
module_dir = path.dirname(path.abspath(__file__))
default_templates = [path.abspath(path.join(module_dir, 'msg-templates')),
path.abspath(path.join(module_dir, '..', 'msg-templates')),
path.abspath('.')]
templates = args_dict.get('templates', None)
if templates:
default_templates.append(path.abspath(path.expanduser(path.expandvars(templates))))
mailer_config['templates_folders'] = default_templates
provider = get_provider(mailer_config)
if args_dict.get('update_lambda'):
if args_dict.get('debug'):
print('\n** --debug is only supported with --run, not --update-lambda **\n')
return
if args_dict.get('max_num_processes'):
print('\n** --max-num-processes is only supported '
'with --run, not --update-lambda **\n')
return
if provider == Providers.Azure:
azure_deploy.provision(mailer_config)
elif provider == Providers.AWS:
deploy.provision(mailer_config, functools.partial(session_factory, mailer_config))
if args_dict.get('run'):
max_num_processes = args_dict.get('max_num_processes')
# Select correct processor
if provider == Providers.Azure:
processor = MailerAzureQueueProcessor(mailer_config, logger)
elif provider == Providers.AWS:
aws_session = session_factory(mailer_config)
processor = MailerSqsQueueProcessor(mailer_config, aws_session, logger)
# Execute
if max_num_processes:
run_mailer_in_parallel(processor, max_num_processes)
else:
processor.run()
if __name__ == '__main__':
main()
| tools/c7n_mailer/c7n_mailer/cli.py | 9,607 | Standard Lambda Function Config Azure Function Config Mailer Infrastructure Config TODO: encrypt with KMS? TODO: encrypt with KMS? SDK Config Mapping account / emails Select correct processor Execute | 199 | en | 0.521463 |
from sklearn.exceptions import NotFittedError
class MockFunction:
"""
Mock utility function for testing.
"""
def __init__(self, return_val):
self.return_val = return_val
def __call__(self, *args):
return self.return_val
class MockEstimator:
"""
Mock classifier object for testing.
"""
def __init__(
self, predict_proba_return=None, predict_return=None, score_return=None,
classes_=None, fitted=True
):
self.fitted = fitted
if fitted:
self.classes_ = classes_
self.predict_return = predict_return
self.predict_proba_return = predict_proba_return
self.score_return = score_return
def fit(self, *args, **kwargs):
pass
def predict(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_return
def predict_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_proba_return
def score(self, *args, **kwargs):
return self.score_return
class MockActiveLearner:
"""
Mock ActiveLearner for testing.
"""
def __init__(
self, predictor=None, query_strategy=None,
predict_proba_return=None, calculate_utility_return=None, predict_return=None, score_return=None,
_X_initial=None, _y_initial=None
):
self.estimator = predictor
self.query_strategy = query_strategy
self.predict_proba_return = predict_proba_return
self.calculate_utility_return = calculate_utility_return
self.predict_return = predict_return
self.score_return = score_return
def fit(self, *args, **kwargs):
pass
def predict(self, *args, **kwargs):
return self.predict_return
def predict_proba(self, *args, **kwargs):
return self.predict_proba_return
def score(self, *args, **kwargs):
return self.score_return
class MockCommittee:
"""
Mock Committee for testing.
"""
def __init__(
self, n_learners=1, classes_=None, fitted=True,
calculate_disagreement_return=None,
predict_return=None, predict_proba_return=None,
vote_return=None, vote_proba_return=None
):
self.fitted = fitted
self.n_learners = n_learners
if fitted:
self.classes_ = classes_
else:
self.classes_ = None
self.calculate_disagreement_return = calculate_disagreement_return
self.predict_return = predict_return
self.predict_proba_return = predict_proba_return
self.vote_return = vote_return
self.vote_proba_return = vote_proba_return
def __len__(self):
return self.n_learners
def __iter__(self):
for x in range(self.n_learners):
yield x
def _calculate_disagreement(self, *args, **kwargs):
return self.calculate_disagreement_return
def predict(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_return
def predict_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_proba_return
def vote(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.vote_return
def vote_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.vote_proba_return
| tests/mock.py | 3,583 | Mock ActiveLearner for testing.
Mock Committee for testing.
Mock classifier object for testing.
Mock utility function for testing. | 130 | en | 0.866248 |
#!/usr/bin/env python
from itertools import izip
import xmlrpclib
import rospy
from rospy.rostime import Time, Duration
from flexbe_core import EventState as Dummy
from flexbe_core import Logger
from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached, ProxyActionClient
from sensor_msgs.msg import JointState
from sweetie_bot_control_msgs.msg import SetOperationalAction, SetOperationalGoal, SetOperationalResult
# This is helper class so trick FlexBe App to ignore it.
# Dummy is actually EventState but FlexBe App is not able to recognize it.
class SetJointStateBase(Dummy):
'''
Base class for states which move robot to named pose using FollowJointState controller.
Pose is loaded from binary parameter from Parameter Server as JointState message.
Then state activate FollowJointState controller and publish pose.
Movement is considered finished when position error is less then given tolerance.
-- controller string FollowJointState controller namespace.
-- tolerance float Position tolerance (rad).
-- timeout float Movement timeout (s).
-- joint_topic string Topic where actual pose published.
<= done Finished.
<= failed Failed to activate FollowJointState controller.
<= timeout Timeout reached.
'''
def __init__(self, controller = 'motion/controller/joint_state_head', tolerance = 0.17, timeout = 10.0,
joint_topic = "joint_states", outcomes = ['done', 'failed', 'timeout']):
super(SetJointStateBase, self).__init__(outcomes = outcomes)
# Store topic parameter for later use.
self._controller = controller
self._joint_topic = joint_topic
self._tolerance = tolerance
self._timeout = Duration.from_sec(timeout)
# create proxies
self._action_client = ProxyActionClient({self._controller: SetOperationalAction})
self._pose_publisher = ProxyPublisher({ self._controller + '/in_joints_ref': JointState })
self._pose_subscriber = ProxySubscriberCached({ self._joint_topic: JointState })
# timestamp
self._timestamp = None
# error in enter hook
self._error = False
def load_joint_state_msg(self, pose_ns, pose_param):
# derive parameter full name
if pose_ns:
pose_param = pose_ns + '/' + pose_param
# Load JointState message from Parameter Server
try:
goal_raw = rospy.get_param(pose_param)
except KeyError as e:
raise KeyError, "SetJointStateBase: Unable to get '" + pose_param + "' parameter."
if not isinstance(goal_raw, xmlrpclib.Binary):
raise TypeError, "SetJointStateBase: ROS parameter '" + pose_param + "' is not a binary data."
# deserialize
self._target_joint_state = JointState()
self._target_joint_state.deserialize(goal_raw.data)
# create joint index to simplify tolerance check
self._joint_target_pose = { name: position for name, position in izip(self._target_joint_state.name, self._target_joint_state.position) }
def on_enter(self, userdata):
self._error = False
# activate controller
actiavtion_request = SetOperationalGoal()
actiavtion_request.operational = True
actiavtion_request.resources = self._target_joint_state.name
try:
self._action_client.send_goal(self._controller, actiavtion_request)
except Exception as e:
Logger.logwarn('SetJointStateBase: Failed to send the SetOperational command:\n%s' % str(e))
self._error = True
return
# set start timestamp
self._timestamp = Time.now()
def execute(self, userdata):
# error in start hook
if self._error:
return 'failed'
# check if controller is active
if not self._action_client.is_active(self._controller):
Logger.loginfo('SetJointStateBase: controller was deactivated by external cause.')
return 'failed';
# check if time elasped
if Time.now() - self._timestamp > self._timeout:
Logger.loginfo('SetJointStateBase: controller was deactivated by external cause.')
return 'timeout'
# publish goal pose
self._pose_publisher.publish(self._controller+'/in_joints_ref', self._target_joint_state)
# check tolerance
joints_msg = self._pose_subscriber.get_last_msg(self._joint_topic)
on_position = True
for name, pos in izip(joints_msg.name, joints_msg.position):
target_pos = self._joint_target_pose.get(name)
if (target_pos != None):
if abs(target_pos - pos) > self._tolerance:
on_position = False
break
if on_position:
Logger.loginfo('SetJointStateBase: on position')
return 'done'
def on_exit(self, userdata):
if self._action_client.is_active(self._controller):
try:
self._action_client.cancel(self._controller)
except Exception as e:
Logger.logwarn('SetJointStateBase: failed to deactivate `' + self._controller + '` controller:\n%s' % str(e))
| sweetie_bot_flexbe_states/src/sweetie_bot_flexbe_states/internal/set_joint_state_base.py | 5,411 | !/usr/bin/env python This is helper class so trick FlexBe App to ignore it. Dummy is actually EventState but FlexBe App is not able to recognize it. Store topic parameter for later use. create proxies timestamp error in enter hook derive parameter full name Load JointState message from Parameter Server deserialize create joint index to simplify tolerance check activate controller set start timestamp error in start hook check if controller is active check if time elasped publish goal pose check tolerance | 510 | en | 0.636998 |
import logging
import asyncio
from steam.ext.csgo import Client
from steam.ext.csgo.enums import Language
from steam.ext.csgo.backpack import BaseInspectedItem
from steam.protobufs import GCMsgProto, EMsg, MsgProto
from steam.protobufs.client_server import CMsgClientLicenseListLicense
from steam_tradeoffer_manager.base import SteamBot, SteamBotPool
_log = logging.getLogger(__name__)
# https://steamdb.info/app/730/subs/
_CSGO_PACKAGE_IDS = {
17039,
88535,
54029,
161243,
261665,
14,
211096,
133828,
4,
49,
16236,
16237,
17878,
18702,
18703,
18939,
27267,
29197,
29198,
36071,
39221,
39297,
51835,
51836,
53711,
59228,
62690,
88534,
88541,
88623,
88624,
61,
392171,
61986,
329385,
303386,
63290,
15740,
298963,
298962,
298961,
272766,
199420,
154735,
277644,
273865,
266388,
229740,
226979,
16222,
16223,
16018,
16019,
54030,
63289,
197847,
4116,
11470,
11758,
15990,
17905,
27618,
27762,
35043,
54627,
60765,
62486,
62606,
62688,
113904,
124041,
125313,
}
_CSGO_ID = 730
class InspectBot(SteamBot[int, "InspectPool"], Client):
_licenses: dict[int, CMsgClientLicenseListLicense]
async def on_ready(self) -> None:
await super().on_ready()
await asyncio.sleep(0.1) # ensure licenses event was emitted
for package_id in _CSGO_PACKAGE_IDS:
if package_id in self.licenses:
break
else:
# TODO: errors requesting free license
_log.info(f"Request free CSGO license for {self}")
await self.request_free_license([_CSGO_ID]) # request CSGO license
self.pool.queue.put_nowait(self)
@property
def licenses(self) -> dict[int, CMsgClientLicenseListLicense]:
return getattr(self, "_licenses", {})
async def on_licenses(self, licenses: list[CMsgClientLicenseListLicense]):
self._licenses = {}
for steam_license in licenses:
self.licenses[steam_license.package_id] = steam_license
def timeout(self) -> asyncio.Task:
async def _timeout():
await asyncio.sleep(1)
self.pool.queue.put_nowait(self)
return asyncio.create_task(_timeout())
def request_free_license(self, app_ids: list[int]): # pragma: no cover
return self.ws.send_proto_and_wait(MsgProto(EMsg.ClientRequestFreeLicense, appids=app_ids))
async def inspect_item(self, s: int, a: int, d: int, m: int, timeout: int) -> BaseInspectedItem: # pragma: no cover
await self.ws.send_gc_message(
GCMsgProto(
Language.Client2GCEconPreviewDataBlockRequest,
param_s=s,
param_a=a,
param_d=d,
param_m=m,
)
)
return await self.wait_for("inspect_item_info", timeout=timeout, check=lambda item: item.id == a)
class InspectPool(SteamBotPool[int, InspectBot]):
INSPECT_TIMEOUT: int
def __init__(self) -> None:
super().__init__()
self.queue: asyncio.Queue[InspectBot] = asyncio.Queue()
async def startup(self) -> None:
await super().startup()
# waiting for first bot is ready and then return
bot = await self.queue.get()
self.queue.put_nowait(bot)
async def inspect_item(self, s: int, a: int, d: int, m: int) -> BaseInspectedItem:
bot = await self.queue.get()
try:
item = await bot.inspect_item(s, a, d, m, self.INSPECT_TIMEOUT)
finally:
bot.timeout()
return item
| app/services/pool/pool.py | 3,769 | https://steamdb.info/app/730/subs/ ensure licenses event was emitted TODO: errors requesting free license request CSGO license pragma: no cover pragma: no cover waiting for first bot is ready and then return | 207 | en | 0.78233 |
import numpy as np
class Reward:
pass
class StaticReward(Reward):
def __init__(self, value):
self.value = value
def get(self):
return value
class NormalReward(Reward):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def get(self):
return np.random.normal(self.mean, self.std)
class Bandit:
def __init__(self, arms):
self.no_of_arms = arms
self.arms = [np.random.normal(0, 1) for _ in range(arms)]
def step(self, arm):
return np.random.normal(self.arms[arm], 1)
class MDP:
"""
Represents a Markov Decision Process.
"""
def __init__(self, S, A, R, p):
"""
Parameters
----------
S : int
Number of states
A : matrix
A[s][a] is True iff a is permitted in s
R : list
A list of reward generators
p : matrix
p[s][a][s'] = p(s'|s,a)
"""
self.S = list(range(S))
self.A, self.R, self.p = A, R, p
self.no_of_states = S
self.no_of_actions = len(A[0])
def step(self, s, a):
"""Given a state and an action, returns a new state and a reward.
Parameters
----------
s : int
Current state
a : int
Action to take
"""
s_prime = np.random.choice(self.no_of_states, p = self.p[s][a])
r = self.R[s_prime].get()
return s_prime, r
def epsilon_greedy(no_of_arms, epsilon, Q, N):
if np.random.random() > epsilon:
# greedy
action = np.argmax(Q)
else:
# random
action = np.random.choice(no_of_arms)
return action
def main():
no_of_arms = 10
no_of_steps = 1000
epsilon = 0.1
no_of_runs = 2000
#bandit = Bandit(no_of_arms)
arms = np.random.normal(0, 1, no_of_arms)
S = 1
A = [[True] * no_of_arms]
R = [NormalReward(m, 1) for m in arms]
p = [[[1] for _ in range(no_of_arms)]]
bandit = MDP(S, A, R, p)
#optimal_action = np.argmax(bandit.arms)
optimal_action = np.argmax(arms)
np.random.seed(1)
Q = [[0] * no_of_arms] * no_of_runs
N = [[0] * no_of_arms] * no_of_runs
mean_rewards = [0] * no_of_steps
for j in range(no_of_steps):
for i in range(no_of_runs):
action = epsilon_greedy(no_of_arms, epsilon, Q[i], N[i])
#reward = bandit.step(action)
_, reward = bandit.step(0, action)
mean_rewards[j] += reward
N[i][action] += 1
Q[i][action] += (1 / N[i][action]) * (reward - Q[i][action])
mean_rewards[j] /= no_of_runs
if __name__ == '__main__':
main() | main.py | 2,299 | Represents a Markov Decision Process.
Parameters
----------
S : int
Number of states
A : matrix
A[s][a] is True iff a is permitted in s
R : list
A list of reward generators
p : matrix
p[s][a][s'] = p(s'|s,a)
Given a state and an action, returns a new state and a reward.
Parameters
----------
s : int
Current state
a : int
Action to take
greedy randombandit = Bandit(no_of_arms)optimal_action = np.argmax(bandit.arms)reward = bandit.step(action) | 496 | en | 0.588514 |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_2 import models
class Username(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'username': 'str'
}
attribute_map = {
'username': 'username'
}
required_args = {
}
def __init__(
self,
username=None, # type: str
):
"""
Keyword args:
username (str): The username of the user.
"""
if username is not None:
self.username = username
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Username`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Username, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Username):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| pypureclient/flasharray/FA_2_2/models/username.py | 3,095 | Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
Returns true if both objects are equal
Keyword args:
username (str): The username of the user.
Returns true if both objects are not equal
For `print` and `pprint`
Returns the model properties as a dict
Returns the string representation of the model
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 type: str | 728 | en | 0.678813 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=39
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.Y.on(input_qubit[2])) # number=18
c.append(cirq.Z.on(input_qubit[3])) # number=28
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[3])) # number=22
c.append(cirq.X.on(input_qubit[3])) # number=13
c.append(cirq.H.on(input_qubit[3])) # number=23
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=24
c.append(cirq.H.on(input_qubit[3])) # number=25
c.append(cirq.H.on(input_qubit[0])) # number=33
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=34
c.append(cirq.H.on(input_qubit[0])) # number=35
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.Y.on(input_qubit[1])) # number=26
c.append(cirq.Y.on(input_qubit[1])) # number=27
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=29
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=36
c.append(cirq.X.on(input_qubit[0])) # number=37
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=38
c.append(cirq.X.on(input_qubit[0])) # number=32
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2615.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | benchmark/startCirq2615.py | 3,107 | !/usr/bin/env python -*- coding: utf-8 -*- @Time : 5/15/20 4:49 PM @File : grover.py qubit number=4 total number=39thatsNoCode Symbols for the rotation angles in the QAOA circuit. circuit begin number=9 number=2 number=3 number=4 number=5 number=18 number=28 number=6 number=7 number=8 number=20 number=21 number=22 number=13 number=23 number=24 number=25 number=33 number=34 number=35 number=19 number=15 number=16 number=17 number=26 number=27 number=29 number=30 number=36 number=37 number=38 number=32 circuit end | 523 | en | 0.275006 |
import simulations.simulation as simulation
import simulations.simulation_runner as simrunner
import cPickle
import os
import random
import re
import string
import subprocess
import sys
from simulations.utils.optionparser import OptionParser
from nose.tools import assert_equal
from nose.tools import assert_raises
def filename_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
class Sim(simulation.Simulation):
def _run(self):
return "runs"
class Sim2(simulation.Simulation):
def _run(self):
print >> self.out, "runs"
return "runs"
class Batch(simrunner.SimulationRunner):
def _add_listeners(self):
self.on('oparser set up', self._set_options)
self.on('options parsed', self._check_options)
self.on('options parsed', self._set_data)
self.on('done', self._when_done)
@staticmethod
def _set_options(self):
self.oparser.add_option("-t", "--test", action="store_true", dest="test", default=False, help="Testing")
@staticmethod
def _check_options(self):
if not self.options.test:
self.oparser.error("Test flag not passed")
@staticmethod
def _set_data(self):
self.data['test'] = self.options.test
@staticmethod
def _when_done(self):
return "test"
class TestSimulation:
def setUp(self):
self.sim = Sim(1, 2, None)
def tearDown(self):
self.sim = None
def test_simulation_init(self):
assert self.sim is not None, "Sim is not set up"
assert_equal(self.sim.data, 1)
assert_equal(self.sim.num, 2)
assert self.sim.outfile is None, "_outfile is not None"
assert self.sim.out is None
assert_equal(self.sim.out_opened, False)
def test_simulation_set_outfile(self):
self.sim.set_output_file("/tmp/test")
assert_equal(self.sim.outfile, "/tmp/test")
assert self.sim.out is None, "Sim.out is set up"
self.sim.is_running = True
self.sim.set_output_file("/tmp/test")
assert self.sim.out is not None, "Sim.out is not set up"
simulation._close_out_fd(self.sim)
assert self.sim.out is None, "Sim.out was not closed"
assert_equal(self.sim.out_opened, False)
simulation._open_out_fd(self.sim)
assert self.sim.out is not None, "Sim.out was not opened"
assert_equal(self.sim.out_opened, True)
self.sim.set_output_file("/tmp/test2")
simulation._open_out_fd(self.sim)
assert self.sim.out is not None, "Sim.out was not opened"
assert_equal(self.sim.out_opened, True)
def test_simulation_run(self):
assert_equal(self.sim.out_opened, False)
self.sim.set_output_file(False)
result = self.sim.run()
assert_equal(self.sim.result, "runs")
assert_equal(result, "runs")
assert_equal(self.sim.out_opened, False)
assert simulation.Simulation._run(self.sim) is None
def test_delegation_method(self):
self.sim.set_output_file(None)
assert_equal(simrunner.run_simulation([Sim, 1, 2, None]), "runs")
class TestSimulationBatch:
def setUp(self):
self.dir = "/tmp/" + filename_generator(8)
self.batch = Batch(Sim2)
def tearDown(self):
self.batch = None
if os.path.isdir(self.dir):
files = os.listdir(self.dir)
for f in files:
if f == "." or f == "..": continue
if f[-8:] == ".testout":
os.remove(self.dir + os.sep + f)
os.rmdir(self.dir)
def test_batch_init(self):
assert self.batch is not None, "Batch is not set up"
assert isinstance(self.batch.oparser, OptionParser), "Option parser is not initialized"
assert self.batch.options is None, "Options is initialized"
assert self.batch.args is None, "Args is initialized"
assert_equal(self.batch.data, {})
assert_equal(self.batch._task_dup_num, False)
assert_equal(len(self.batch.identifier), 6)
assert re.match('[{0}{1}]{{6}}'.format(string.ascii_uppercase, string.digits), self.batch.identifier)
def test_handler_options(self):
sim2 = Batch(Sim2, option_error_handler=2, option_exit_handler=3)
assert_equal(sim2.oparser._errorhandler, 2)
assert_equal(sim2.oparser._exithandler, 3)
def test_batch_option_setup(self):
assert self.batch.oparser.has_option("-D"), "No -D option"
assert self.batch.oparser.has_option("--nofiledump"), "No --nofiledump option"
assert self.batch.oparser.has_option("-F"), "No -F option"
assert self.batch.oparser.has_option("--filename"), "No --filename option"
assert self.batch.oparser.has_option("-N"), "No -N option"
assert self.batch.oparser.has_option("--duplications"), "No --duplications option"
assert self.batch.oparser.has_option("-O"), "No -O option"
assert self.batch.oparser.has_option("--output"), "No --output option"
assert self.batch.oparser.has_option("-P"), "No -P option"
assert self.batch.oparser.has_option("--poolsize"), "No --poolsize option"
assert self.batch.oparser.has_option("-Q"), "No -Q option"
assert self.batch.oparser.has_option("--quiet"), "No --quiet option"
assert self.batch.oparser.has_option("-S"), "No -S option"
assert self.batch.oparser.has_option("--statsfile"), "No --statsfile option"
assert self.batch.oparser.has_option("-t"), "No -t option"
assert self.batch.oparser.has_option("--test"), "No --test option"
def test_batch_go(self):
args = ["-F", "iter_{0}.testout", "-N", "4", "-O", self.dir, "-S", "results.testout", "--test"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 4)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "iter_{0}.testout")
assert_equal(self.batch.options.file_dump, True)
assert_equal(self.batch.options.stats_file, "results.testout")
## pp stuff
#assert_equal(self.batch.options.pool_size, 'autodetect')
assert self.batch.options.pool_size is None, "Pool size is not None"
assert_equal(self.batch.options.quiet, False)
assert_equal(self.batch.data['test'], True)
for i in range(4):
assert os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
for i in range(4):
with open(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1), "r") as dup_file:
assert_equal(dup_file.read(), "runs\n")
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(4):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_batch_go2(self):
args = ["-N", "6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "--test", "-D"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 6)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "duplication_{0}")
assert_equal(self.batch.options.file_dump, False)
assert_equal(self.batch.options.stats_file, "results.testout")
assert_equal(self.batch.options.pool_size, 2)
assert_equal(self.batch.options.quiet, True)
assert_equal(self.batch.data['test'], True)
for i in range(6):
assert not os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(6):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_batch_go3(self):
args = ["-N", "6", "-P", "1", "-O", self.dir, "-S", "results.testout", "--test", "-D"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 6)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "duplication_{0}")
assert_equal(self.batch.options.file_dump, False)
assert_equal(self.batch.options.stats_file, "results.testout")
assert_equal(self.batch.options.pool_size, 1)
assert_equal(self.batch.options.quiet, False)
assert_equal(self.batch.data['test'], True)
for i in range(6):
assert not os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(6):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_option_failure(self):
args = ["-N", "-6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "-D", "--test"]
assert_raises(SystemExit, self.batch.go, option_args=args)
assert_raises(SystemExit, self.batch.go, option_values=None)
def test_option_failure2(self):
args = ["-N", "6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "-D"]
assert_raises(SystemExit, self.batch.go, option_args=args)
def test_option_failure3(self):
args = ["-N", "6", "-P", "-1", "-O", self.dir, "-S", "results.testout", "-Q", "-D", "--test"]
assert_raises(SystemExit, self.batch.go, option_args=args)
## pp stuff
#class TestClustering:
#
# def setUp(self):
# self.secret = filename_generator(6)
# self.server = subprocess.Popen(["ppserver.py", "-s", self.secret])
# self.batch = Batch(Sim2)
# self.dir = "/tmp/" + filename_generator(8)
#
# def tearDown(self):
# self.batch = None
# self.server.terminate()
# if os.path.isdir(self.dir):
# files = os.listdir(self.dir)
# for f in files:
# if f == "." or f == "..": continue
# if f[-8:] == ".testout":
# os.remove(self.dir + os.sep + f)
# os.rmdir(self.dir)
#
# def test_batch_cluster_go(self):
# args = ["-F", "iter_{0}.testout", "-N", "4", "-P", "2", "-O", self.dir, "-S", "results.testout", "--test", "--cluster=127.0.0.1", "--clustersecret="+self.secret]
# assert self.batch.go(option_args=args) is None
# assert_equal(self.batch.options.test, True)
# assert_equal(self.batch.options.dup, 4)
# assert_equal(self.batch.options.output_dir, self.dir)
# assert_equal(self.batch.options.output_file, "iter_{0}.testout")
# assert_equal(self.batch.options.file_dump, True)
# assert_equal(self.batch.options.stats_file, "results.testout")
# assert_equal(self.batch.options.pool_size, 2)
# assert_equal(self.batch.options.quiet, False)
# assert_equal(self.batch.options.cluster_string, '127.0.0.1')
# assert_equal(self.batch.options.cluster_secret, self.secret)
#
# assert_equal(self.batch.data['test'], True)
#
# for i in range(4):
# assert os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
# assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
#
# for i in range(4):
# with open(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1), "r") as dup_file:
# assert_equal(dup_file.read(), "runs\n")
#
# with open(self.dir + os.sep + 'results.testout', "r") as results_file:
# should_be = ''
# should_be += cPickle.dumps(self.batch.options) + "\n"
# should_be += "\n"
# for _ in range(4):
# should_be += cPickle.dumps("runs") + "\n"
# should_be += "\n"
# assert_equal(results_file.read(), should_be)
#
| test/simulation_tests.py | 13,112 | pp stuffassert_equal(self.batch.options.pool_size, 'autodetect') pp stuffclass TestClustering: def setUp(self): self.secret = filename_generator(6) self.server = subprocess.Popen(["ppserver.py", "-s", self.secret]) self.batch = Batch(Sim2) self.dir = "/tmp/" + filename_generator(8) def tearDown(self): self.batch = None self.server.terminate() if os.path.isdir(self.dir): files = os.listdir(self.dir) for f in files: if f == "." or f == "..": continue if f[-8:] == ".testout": os.remove(self.dir + os.sep + f) os.rmdir(self.dir) def test_batch_cluster_go(self): args = ["-F", "iter_{0}.testout", "-N", "4", "-P", "2", "-O", self.dir, "-S", "results.testout", "--test", "--cluster=127.0.0.1", "--clustersecret="+self.secret] assert self.batch.go(option_args=args) is None assert_equal(self.batch.options.test, True) assert_equal(self.batch.options.dup, 4) assert_equal(self.batch.options.output_dir, self.dir) assert_equal(self.batch.options.output_file, "iter_{0}.testout") assert_equal(self.batch.options.file_dump, True) assert_equal(self.batch.options.stats_file, "results.testout") assert_equal(self.batch.options.pool_size, 2) assert_equal(self.batch.options.quiet, False) assert_equal(self.batch.options.cluster_string, '127.0.0.1') assert_equal(self.batch.options.cluster_secret, self.secret) assert_equal(self.batch.data['test'], True) for i in range(4): assert os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1) assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing" for i in range(4): with open(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1), "r") as dup_file: assert_equal(dup_file.read(), "runs\n") with open(self.dir + os.sep + 'results.testout', "r") as results_file: should_be = '' should_be += cPickle.dumps(self.batch.options) + "\n" should_be += "\n" for _ in range(4): should_be += cPickle.dumps("runs") + "\n" should_be += "\n" assert_equal(results_file.read(), should_be) | 2,373 | en | 0.257626 |
"""Support for Blockchain.com sensors."""
from datetime import timedelta
import logging
from pyblockchain import get_balance, validate_address
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by blockchain.com"
CONF_ADDRESSES = "addresses"
DEFAULT_NAME = "Bitcoin Balance"
ICON = "mdi:currency-btc"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADDRESSES): [cv.string],
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Blockchain.com sensors."""
addresses = config[CONF_ADDRESSES]
name = config[CONF_NAME]
for address in addresses:
if not validate_address(address):
_LOGGER.error("Bitcoin address is not valid: %s", address)
return False
add_entities([BlockchainSensor(name, addresses)], True)
class BlockchainSensor(Entity):
"""Representation of a Blockchain.com sensor."""
def __init__(self, name, addresses):
"""Initialize the sensor."""
self._name = name
self.addresses = addresses
self._state = None
self._unit_of_measurement = "BTC"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest state of the sensor."""
self._state = get_balance(self.addresses)
| homeassistant/components/blockchain/sensor.py | 2,275 | Representation of a Blockchain.com sensor.
Initialize the sensor.
Return the state attributes of the sensor.
Return the icon to use in the frontend, if any.
Return the name of the sensor.
Set up the Blockchain.com sensors.
Return the state of the sensor.
Return the unit of measurement this sensor expresses itself in.
Get the latest state of the sensor.
Support for Blockchain.com sensors. | 390 | en | 0.72127 |
from typing import Any
from copy import deepcopy
class Model:
def __init__(self, name: str, model, freq: str):
self.name = name
self.model = model
self.freq = freq
self.train = None
self.test = None
self.prediction = None
self.pred_col = "prediction"
self.y_col = "y"
self.date_col = "ds"
def fit(self, train_dataset):
"Performs model training with standard settings"
self.train = deepcopy(train_dataset)
if "orbit" in self.name:
self.model.fit(self.train)
elif "nprophet" in self.name:
self.model.fit(self.train, validate_each_epoch=True,
valid_p=0.2, freq=self.freq,
plot_live_loss=True, epochs=100)
def predict(self, dataset: Any):
"Performs prediction"
self.test = deepcopy(dataset)
if "orbit" in self.name:
prediction = self.model.predict(self.test)
elif "nprophet" in self.name:
future = self.model.make_future_dataframe(self.train, periods=len(self.test))
prediction = self.model.predict(future).rename(columns={"yhat1": self.pred_col})
prediction = prediction[[self.date_col, self.pred_col]]
self.prediction = prediction
return self.prediction
| interpolML/interpolML/model/model.py | 1,353 | Performs model training with standard settings
Performs prediction | 66 | en | 0.939886 |
#!/usr/bin/python
# https://practice.geeksforgeeks.org/problems/knapsack-with-duplicate-items/0
def sol(n, w, wt, v):
"""
We do not need to create a 2d array here because all numbers are available
always
Try all items for weight ranging from 1 to w and check if weight
can be picked. Take the max of the result
"""
dp = [0 for i in range(w+1)]
for i in range(n):
for j in range(w+1):
if wt[i] <= j:
dp[j] = max(dp[j], v[i]+dp[j-wt[i]])
return dp[w] | full-problems/knapsackWithDuplicates.py | 526 | We do not need to create a 2d array here because all numbers are available
always
Try all items for weight ranging from 1 to w and check if weight
can be picked. Take the max of the result
!/usr/bin/python https://practice.geeksforgeeks.org/problems/knapsack-with-duplicate-items/0 | 282 | en | 0.786482 |