content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import datetime
import random
import csv
import sys
book_titles = [
'Advanced Deep Learning with Keras',
'Hands-On Machine Learning for Algorithmic Trading',
'Architects of Intelligence',
'Deep Reinforcement Learning Hands-On',
'Natural Language Processing with TensorFlow',
'Hands-On Reinforcement Learning with Python',
'Brave New World',
'The Grapes of Wrath',
'For Whom The Bell Tolls',
'To Kill A Mocking Bird',
'The Great Gatsby',
'The Catcher in the Rye',
'Farenheit 451',
'Pride and Prejudice',
'1984',
'Animal Farm: A Fairy Story',
'Paul Clifford',
'The Talisman',
]
users = ['alice', 'bob', 'carol', 'david']
review_content_rating = [
('Fascinating work.', 5),
('Great Read.', 5),
('Inspiring.', 5),
('I can''t put this one down..', 5),
('Interesting book.', 4),
('Quite good.', 4),
('Fair.', 3),
('Interesting.', 3),
('Definitely just for the fans.', 2),
('A tad mediocre.', 2),
('Not my cup of tea.', 1),
]
csvfile = sys.stdout
csvwriter = csv.writer(csvfile) # , delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
header = ['review_content', 'review_rating', 'review_date_created', 'review_date_edited', 'review_creator', 'review_book']
csvwriter.writerow(header)
for user in users:
time_now = datetime.datetime.now() - datetime.timedelta(days=60)
for book_title in book_titles:
if random.random()<0.5:
continue
review_content, review_rating = random.choice(review_content_rating)
time_now += datetime.timedelta(days=random.randint(0, 10)) + datetime.timedelta(seconds=int(random.random()*24*3600))
review_date_created = time_now.strftime('%Y-%m-%d %H:%M:%S')
review_date_edited = (time_now + datetime.timedelta(seconds=int(random.random()*24*3600))).strftime('%Y-%m-%d %H:%M:%S')
csvwriter.writerow([review_content, review_rating, review_date_created, review_date_edited, user, book_title])
"""
content:Review,,,,,
review_content,review_rating,review_date_created,review_date_edited,review_creator,review_book
A must read for all,5,2020-01-04 16:31:40.376237,2020-01-04 16:31:40.376237,peterjones@test.com,Advanced Deep Learning with Keras
An ok read,3,2020-01-04 16:31:40.376237,2020-01-04 16:31:40.376237,marksandler@test.com,Advanced Deep Learning with Keras
"""
| 33.114286 | 129 | 0.696721 | [
"MIT"
] | PacktWorkshops/The-Django-Workshop | Old Code Backup/chapters9,10,11/_newChapter10/Exercise01/bookr/reviews/management/commands/DjangoWorkshopReviewsData.py | 2,318 | Python |
from rate_limiter.Limit import Limit
from rate_limiter.Parser import Parser
from rate_limiter.LimitProcessor import LimitProcessor
from rate_limiter.LogLine import LogLine
from typing import Dict, List
from datetime import datetime
class IpRateLimiter:
# used to store unban time. Also used to maintain what is currently banned
ipToUnbanTimeMap: List
logLineList: List[LogLine]
limitProcessorList: List[LimitProcessor]
def __init__(self, logLineList, limitProcessorList) -> None:
self.logLineList = logLineList
self.limitProcessorList = limitProcessorList
self.ipToUnbanTimeMap = {}
def processNewBan(self, newIpToUnbanTimeMap, currTime):
for ip in newIpToUnbanTimeMap:
if ip not in self.ipToUnbanTimeMap:
# new ban. Need to print
print("{0},BAN,{1}".format(int(currTime.timestamp()), ip))
self.ipToUnbanTimeMap[ip] = newIpToUnbanTimeMap[ip]
else:
self.ipToUnbanTimeMap[ip] = max(
self.ipToUnbanTimeMap[ip], newIpToUnbanTimeMap[ip]
)
def needsToBeUnbanned(self, ip: str, currTime: datetime):
toUnban = False
if currTime >= self.ipToUnbanTimeMap[ip]:
toUnban = True
return toUnban
def unbanPassedIPs(self, currTime: datetime):
toUnban = []
for ip in self.ipToUnbanTimeMap:
if self.needsToBeUnbanned(ip, currTime):
toUnban.append(ip)
for ip in toUnban:
print("{0},UNBAN,{1}".format(int(currTime.timestamp()), ip))
# print("{0},UNBAN,{1}".format(self.ipToUnbanTimeMap[ip].timestamp(), ip))
self.ipToUnbanTimeMap.pop(ip)
def run(self):
for line in self.logLineList:
currTime = line.getTime()
# evict expired entries from each processor window
for limitProcessor in self.limitProcessorList:
limitProcessor.evictExpired(currTime)
# check all banned ips if they need to be unbanned
self.unbanPassedIPs(currTime)
# process new request in limit processors
for limitProcessor in self.limitProcessorList:
newBanMap = limitProcessor.processNewRequest(line)
self.processNewBan(newBanMap, currTime)
if self.logLineList and self.ipToUnbanTimeMap:
for ip in self.ipToUnbanTimeMap:
print(
"{0},UNBAN,{1}".format(
int(self.ipToUnbanTimeMap[ip].timestamp()), ip
)
)
class IpRateLimiterBuilder:
filePath = ""
fileParser: Parser = None
limitList: List[Limit] = []
def addFile(self, parser: Parser, filePath: str):
self.filePath = filePath
self.fileParser = parser
return self
def addLimit(self, limit: Limit):
self.limitList.append(limit)
return self
def build(self):
logLineList = self.fileParser.parse(self.filePath)
limitProcessorList: List[LimitProcessor] = []
for limit in self.limitList:
limitProcessorList.append(LimitProcessor(limit))
return IpRateLimiter(logLineList, limitProcessorList)
| 34.829787 | 86 | 0.627673 | [
"MIT"
] | v-shash/ackerman | rate-limiter/rate_limiter/IPRateLimiter.py | 3,274 | Python |
import numpy as np
import tensorflow as tf
from pyuvdata import UVData, UVCal, UVFlag
from . import utils
import copy
import argparse
import itertools
import datetime
from pyuvdata import utils as uvutils
from .utils import echo
from .utils import PBARS
from . import cal_utils
from . import modeling
import re
OPTIMIZERS = {
"Adadelta": tf.optimizers.Adadelta,
"Adam": tf.optimizers.Adam,
"Adamax": tf.optimizers.Adamax,
"Ftrl": tf.optimizers.Ftrl,
"Nadam": tf.optimizers.Nadam,
"SGD": tf.optimizers.SGD,
"RMSprop": tf.optimizers.RMSprop,
"Adagrad": tf.optimizers.Adagrad
}
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"""
Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
"""
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if not use_redundancy:
# We can remove redundancies for fitting groups of baselines that have the same
# number of elements in each redundant group.
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
# only break up groups with small numbers of group elements.
if np.allclose(rlens, np.mean(rlens)) and len(rlens) < grp_size_threshold:
# split up groups.
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if nbl in chunked_keys:
chunked_keys[nbl].append(fit_grp)
if fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]:
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked
def tensorize_fg_model_comps_dict(
fg_model_comps_dict,
ants_map,
nfreqs,
use_redundancy=False,
dtype=np.float32,
notebook_progressbar=False,
verbose=False,
grp_size_threshold=5,
):
"""Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
"""
echo(
f"{datetime.datetime.now()} Computing foreground components matrices...\n",
verbose=verbose,
)
# chunk foreground components.
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(
fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold
)
fg_model_comps = []
corr_inds = []
for nbls, nvecs in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for grpnum, modeling_grp in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for rgrpnum, red_grp in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice(rgrpnum * nfreqs, (rgrpnum + 1) * nfreqs)
dslice = slice(nbl * nfreqs, (nbl + 1) * nfreqs)
modeling_matrix[vecslice, grpnum, nbl] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][
compslice
].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return fg_model_comps, corr_inds
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
"""Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place.
"""
# compute and multiply out scale-factor accounting for overall amplitude and phase degeneracy.
polnum_data = np.where(
uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-7, rtol=0.0)
selection = (
~uvdata_deconv.flag_array[bltsel, :, :, polnum_data]
& ~uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]
)
if additional_flags is not None:
selection = selection & ~additional_flags[bltsel, :, :, polnum_data]
data_ratio = (
uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection]
/ uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection]
)
data_ratio[~np.isfinite(data_ratio)] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean(np.abs(data_ratio) ** 2.0))
scale_factor = scale_factor_abs # * np.exp(1j * scale_factor_phase) Need to figure this out later.
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where(
gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor) ** -0.5
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
"""Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return gains_re, gains_im
def yield_fg_model_array(
nants,
nfreqs,
fg_model_comps,
fg_coeffs,
corr_inds,
):
"""Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
"""
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum(fg_coeffs[cnum] * fg_model_comps[cnum], axis=0).numpy()
for gnum in range(ngrps):
for blnum, (i, j) in enumerate(corr_inds[cnum][gnum]):
model[i, j] = gchunk[gnum, blnum]
return model
def fit_gains_and_foregrounds(
g_r,
g_i,
fg_r,
fg_i,
data_r,
data_i,
wgts,
fg_comps,
corr_inds,
use_min=False,
tol=1e-14,
maxsteps=10000,
optimizer="Adamax",
freeze_model=False,
verbose=False,
notebook_progressbar=False,
dtype=np.float32,
graph_mode=False,
n_profile_steps=0,
profile_log_dir="./logdir",
sky_model_r=None,
sky_model_i=None,
model_regularization=None,
graph_args_dict=None,
**opt_kwargs,
):
"""Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
if graph_args_dict is None:
graph_args_dict = {}
# initialize the optimizer.
echo(f"Using {str(dtype)} precision.")
echo(f"{datetime.datetime.now()} Provided the following opt_kwargs")
for k in opt_kwargs:
echo(f"{k}: {opt_kwargs[k]}")
opt = OPTIMIZERS[optimizer](**opt_kwargs)
# set up history recording
fit_history = {"loss": []}
min_loss = 9e99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
# build up list of lists of ant0 and ant1 for gather ops
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if not freeze_model:
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = [g_r, g_i] + fg_r + fg_i
else:
vars = [g_r, g_i]
echo(
f"{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...",
verbose=verbose,
)
if not freeze_model:
echo(
f"Performing gradient descent on total of {int(np.sum([fgr.shape[0] * fgr.shape[1] for fgr in fg_r]))} complex foreground parameters",
verbose=verbose,
)
echo(
f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[str(fgr.shape[:2]) + ':' + str(dc.shape[1]) for fgr, dc in zip(fg_r, data_r)]}",
verbose=verbose,
)
if model_regularization == "sum":
prior_r_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_r[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
prior_i_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_i[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
def loss_function():
return mse_chunked_sum_regularized(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
prior_r_sum=prior_r_sum,
prior_i_sum=prior_i_sum,
)
else:
def loss_function():
return mse_chunked(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if n_profile_steps > 0:
echo(f"{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...")
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace("train", step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(
f"{datetime.datetime.now()} Building Computational Graph...\n",
verbose=verbose,
)
loss = train_step()
echo(
f"{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...\n",
verbose=verbose,
)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history["loss"].append(loss.numpy())
if use_min and fit_history["loss"][-1] < min_loss:
# store the g_r, g_i, fg_r, fg_i values that minimize loss
# in case of overshoot.
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if step >= 1 and np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]) < tol:
echo(
f"Tolerance thresshold met with delta of {np.abs(fit_history['loss'][-1] - fit_history['loss'][-2]):.2e}. Terminating...\n ",
verbose=verbose,
)
break
# if we dont use use_min, then the last
# visited set of parameters will be used
# to set the ML params.
if not use_min:
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(
f"{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...\n",
verbose=verbose,
)
return g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history
def insert_model_into_uvdata_tensor(
uvdata,
time,
polarization,
ants_map,
red_grps,
model_r,
model_i,
scale_factor=1.0,
):
"""Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace.
"""
antpairs_data = uvdata.get_antpairs()
polnum = np.where(
uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)
)[0][0]
for red_grp in red_grps:
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
if ap in antpairs_data:
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] + 1j * model_i[i, j]
else:
dinds = uvdata.antpair2ind(ap[::-1])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] - 1j * model_i[i, j]
uvdata.data_array[dinds, 0, :, polnum] = model * scale_factor
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
"""Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace.
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (
gains_re[ant_index].numpy() + 1j * gains_im[ant_index].numpy()
)
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
def get_auto_weights(uvdata, delay_extent=25.0):
"""
inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.
"""
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode="flag")
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
# compute autocorrelation weights
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if bl[0] == bl[1]:
d_wf = uvdata.get_data(bl)
w_wf = ~uvdata.get_flags(bl)
auto_fit_dict[bl] = []
for ds, fs in zip(d_wf, w_wf):
# fit autocorr waterfall to DPSS modes.
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze()
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
# from autocorrelation fits, weights
for bl in bls:
smooth_weights = 1.0 / (auto_fit_dict[bl[0], bl[0], bl[-1]] * auto_fit_dict[bl[1], bl[1], bl[-1]])
smooth_weights *= ~uvdata.get_flags(bl)
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where(
data_weights.polarization_array == uvutils.polstr2num(bl[-1], x_orientation=data_weights.x_orientation)
)[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights
def calibrate_and_model_tensor(
uvdata,
fg_model_comps_dict,
gains=None,
freeze_model=False,
optimizer="Adamax",
tol=1e-14,
maxsteps=10000,
include_autos=False,
verbose=False,
sky_model=None,
dtype=np.float32,
use_min=False,
use_redundancy=False,
notebook_progressbar=False,
correct_resid=False,
correct_model=True,
weights=None,
nsamples_in_weights=True,
graph_mode=False,
grp_size_threshold=5,
n_profile_steps=0,
profile_log_dir="./logdir",
model_regularization="sum",
init_guesses_from_previous_time_step=False,
skip_threshold=0.5,
use_model_snr_weights=False,
**opt_kwargs,
):
"""Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
antpairs_data = uvdata.get_antpairs()
if not include_autos:
antpairs_data = set([ap for ap in antpairs_data if ap[0] != ap[1]])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
# get redundant groups
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if gains is None:
echo(
f"{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...\n",
verbose=verbose,
)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if sky_model is None and model_regularization is not None:
echo(
f"{datetime.datetime.now()} Sky model is None. Initializing from data...\n",
verbose=verbose,
)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for i, ant in enumerate(gains.ant_array)}
# generate tensors to hold foreground components.
fg_model_comps, corr_inds = tensorize_fg_model_comps_dict(
fg_model_comps_dict=fg_model_comps_dict,
ants_map=ants_map,
dtype=dtype,
nfreqs=sky_model.Nfreqs,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
grp_size_threshold=grp_size_threshold,
)
echo(
f"{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...\n",
verbose=verbose,
)
# delete fg_model_comps_dict. It can take up a lot of memory.
del fg_model_comps_dict
# loop through polarization and times.
for polnum, pol in enumerate(uvdata.get_pols()):
echo(
f"{datetime.datetime.now()} Working on pol {pol}, {polnum + 1} of {uvdata.Npols}...\n",
verbose=verbose,
)
fit_history_p = {}
first_time = True
for time_index, time in enumerate(np.unique(uvdata.time_array)):
echo(
f"{datetime.datetime.now()} Working on time {time_index + 1} of {uvdata.Ntimes}...\n",
verbose=verbose,
)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-7, rtol=0.0)
frac_unflagged = np.count_nonzero(~uvdata.flag_array[bltsel, 0, :, polnum]) / (
uvdata.Nbls * uvdata.Nfreqs
)
# check that fraction of unflagged data > skip_threshold.
if frac_unflagged >= skip_threshold:
rmsdata = np.sqrt(
np.mean(
np.abs(uvdata.data_array[bltsel, 0, :, polnum][~uvdata.flag_array[bltsel, 0, :, polnum]]) ** 2.0
)
)
echo(f"{datetime.datetime.now()} Tensorizing data...\n", verbose=verbose)
data_r, data_i, wgts = tensorize_data(
uvdata,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
nsamples_in_weights=nsamples_in_weights,
dtype=dtype,
)
if sky_model is not None:
echo(f"{datetime.datetime.now()} Tensorizing sky model...\n", verbose=verbose)
sky_model_r, sky_model_i, _ = tensorize_data(
sky_model,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
dtype=dtype,
)
else:
sky_model_r, sky_model_i = None, None
if first_time or not init_guesses_from_previous_time_step:
first_time = False
echo(f"{datetime.datetime.now()} Tensorizing Gains...\n", verbose=verbose)
g_r, g_i = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
# generate initial guess for foreground coeffs.
echo(
f"{datetime.datetime.now()} Tensorizing Foreground coeffs...\n",
verbose=verbose,
)
fg_r = tensorize_fg_coeffs(
data=data_r,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
fg_i = tensorize_fg_coeffs(
data=data_i,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for fgr, fgi, fgc in zip(fg_r, fg_i, fg_model_comps)]
wgts = [(tf.square(wm[0]) + tf.square(wm[1])) * w for wm, w in zip(wgts_model, wgts)]
del wgts_model
# renormalize
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [w / wgts_sum for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index],) = fit_gains_and_foregrounds(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
data_r=data_r,
data_i=data_i,
wgts=wgts,
fg_comps=fg_model_comps,
corr_inds=corr_inds,
optimizer=optimizer,
use_min=use_min,
freeze_model=freeze_model,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
tol=tol,
dtype=dtype,
maxsteps=maxsteps,
graph_mode=graph_mode,
n_profile_steps=n_profile_steps,
profile_log_dir=profile_log_dir,
sky_model_r=sky_model_r,
sky_model_i=sky_model_i,
model_regularization=model_regularization,
**opt_kwargs,
)
# insert into model uvdata.
insert_model_into_uvdata_tensor(
uvdata=model,
time=time,
polarization=pol,
ants_map=ants_map,
red_grps=red_grps,
model_r=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_r,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
model_i=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_i,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
scale_factor=rmsdata,
)
# insert gains into uvcal
insert_gains_into_uvcal(
uvcal=gains,
time=time,
polarization=pol,
gains_re=g_r,
gains_im=g_i,
)
else:
echo(
f"{datetime.datetime.now()}: Only {frac_unflagged * 100}-percent of data unflagged. Skipping...\n",
verbose=verbose,
)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = "skipped!"
# normalize on sky model if we use post-hoc regularization
if not freeze_model and model_regularization == "post_hoc" and np.any(~model.flag_array[bltsel]):
renormalize(
uvdata_reference_model=sky_model,
uvdata_deconv=model,
gains=gains,
polarization=pol,
time=time,
additional_flags=uvdata.flag_array,
)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if not correct_model:
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0 # set resid to zero where model is flagged.
resid.data_array[uvdata.flag_array] = 0.0 # also set resid to zero where data is flagged.
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return model, resid, gains, fit_history
def flag_poltime(data_object, time, polarization):
if isinstance(data_object, UVData):
bltsel = np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0)
polnum = np.where(
data_object.polarization_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
data_object.flag_array[bltsel, :, :, polnum] = True
data_object.data_array[bltsel, :, :, polnum] = 0.0
elif isinstance(data_object, UVCal):
polnum = np.where(
data_object.jones_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
gindt = np.where(np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0))[0][0]
data_object.gain_array[:, 0, :, gindt, polnum] = 1.0
data_object.flag_array[:, 0, :, gindt, polnum] = True
else:
raise ValueError("only supports data_object that is UVCal or UVData.")
def calibrate_and_model_mixed(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
ant_dly=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
red_tol_freq=0.5,
n_angle_bins=200,
notebook_progressbar=False,
use_redundancy=False,
use_tensorflow_to_derive_modeling_comps=False,
eigenval_cutoff=1e-10,
dtype_matinv=np.float64,
require_exact_angle_match=True,
angle_match_tol=1e-3,
grp_size_threshold=5,
model_comps_dict=None,
save_dict_to=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
# get fitting groups
fitting_grps, blvecs, _, _ = modeling.get_uv_overlapping_grps_conjugated(
uvdata,
red_tol=red_tol,
include_autos=include_autos,
red_tol_freq=red_tol_freq,
n_angle_bins=n_angle_bins,
notebook_progressbar=notebook_progressbar,
require_exact_angle_match=require_exact_angle_match,
angle_match_tol=angle_match_tol,
)
if model_comps_dict is None:
model_comps_dict = modeling.yield_mixed_comps(
fitting_grps,
blvecs,
uvdata.freq_array[0],
eigenval_cutoff=eigenval_cutoff,
use_tensorflow=use_tensorflow_to_derive_modeling_comps,
ant_dly=ant_dly,
horizon=horizon,
offset=offset,
min_dly=min_dly,
verbose=verbose,
dtype=dtype_matinv,
notebook_progressbar=notebook_progressbar,
grp_size_threshold=grp_size_threshold,
)
if save_dict_to is not None:
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def calibrate_and_model_dpss(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
notebook_progressbar=False,
fg_model_comps_dict=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(
uvdata,
horizon=horizon,
min_dly=min_dly,
offset=offset,
include_autos=include_autos,
red_tol=red_tol,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=dpss_model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def fg_model(fg_r, fg_i, fg_comps):
vr = tf.reduce_sum(fg_r * fg_comps, axis=0)
vi = tf.reduce_sum(fg_i * fg_comps, axis=0)
return vr, vi
def data_model(g_r, g_i, fg_r, fg_i, fg_comps, ant0_inds, ant1_inds):
gr0 = tf.gather(g_r, ant0_inds)
gr1 = tf.gather(g_r, ant1_inds)
gi0 = tf.gather(g_i, ant0_inds)
gi1 = tf.gather(g_i, ant1_inds)
grgr = gr0 * gr1
gigi = gi0 * gi1
grgi = gr0 * gi1
gigr = gi0 * gr1
vr, vi = fg_model(fg_r, fg_i, fg_comps)
model_r = (grgr + gigi) * vr + (grgi - gigr) * vi
model_i = (gigr - grgi) * vr + (grgr + gigi) * vi
return model_r, model_i
def mse(model_r, model_i, data_r, data_i, wgts):
return tf.reduce_sum((tf.square(data_r - model_r) + tf.square(data_i - model_i)) * wgts)
def mse_chunked(g_r, g_i, fg_r, fg_i, fg_comps, nchunks, data_r, data_i, wgts, ant0_inds, ant1_inds, dtype=np.float32):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return tf.reduce_sum(tf.stack(cal_loss))
def mse_chunked_sum_regularized(
g_r,
g_i,
fg_r,
fg_i,
fg_comps,
nchunks,
data_r,
data_i,
wgts,
ant0_inds,
ant1_inds,
prior_r_sum,
prior_i_sum,
dtype=np.float32,
):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_i_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_r_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
# compute sum of real and imag parts x weights for regularization.
model_r_sum[cnum] += tf.reduce_sum(model_r * wgts[cnum])
model_i_sum[cnum] += tf.reduce_sum(model_i * wgts[cnum])
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return (
tf.reduce_sum(tf.stack(cal_loss))
+ tf.square(tf.reduce_sum(tf.stack(model_r_sum)) - prior_r_sum)
+ tf.square(tf.reduce_sum(tf.stack(model_i_sum)) - prior_i_sum)
)
def read_calibrate_and_model_dpss(
input_data_files,
input_model_files=None,
input_gain_files=None,
resid_outfilename=None,
gain_outfilename=None,
model_outfilename=None,
fitted_info_outfilename=None,
x_orientation="east",
clobber=False,
bllen_min=0.0,
bllen_max=np.inf,
bl_ew_min=0.0,
ex_ants=None,
select_ants=None,
gpu_index=None,
gpu_memory_limit=None,
precision=32,
use_autocorrs_in_weights=False,
**calibration_kwargs,
):
"""
Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
gpus = tf.config.list_physical_devices("GPU")
if gpu_index is not None:
# See https://www.tensorflow.org/guide/gpu
if gpus:
if gpu_memory_limit is None:
tf.config.set_visible_devices(gpus[gpu_index], "GPU")
else:
tf.config.set_logical_device_configuration(
gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=gpu_memory_limit * 1024)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(
uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants
)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if input_model_files is not None:
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if uvd_model is not None:
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if input_gain_files is not None:
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
# run calibration with specified GPU device.
dtype = {32: np.float32, 64: np.float64}[precision]
if gpu_index is not None and gpus:
with tf.device(f"/device:GPU:{gpus[gpu_index].name[-1]}"):
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
else:
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
if resid_outfilename is not None:
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if gain_outfilename is not None:
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if model_outfilename is not None:
model_fit.write_uvh5(model_outfilename, clobber=clobber)
# don't write fitting_info_outfilename for now.
fit_info["calibration_kwargs"] = calibration_kwargs
fit_info["calibration_kwargs"]["dtype"] = dtype
# don't write fitting_info_outfilename for now.
return model_fit, resid_fit, gains_fit, fit_info
def input_output_parser():
ap = argparse.ArgumentParser()
sp = ap.add_argument_group("Input and Output Arguments.")
sp.add_argument("--input_data_files", type=str, nargs="+", help="paths to data files to calibrate.", required=True)
sp.add_argument(
"--input_model_files", type=str, nargs="+", help="paths to model files to set overal amplitude and phase."
)
sp.add_argument("--input_gain_files", type=str, nargs="+", help="paths to gains to use as a staring point.")
sp.add_argument("--resid_outfilename", type=str, default=None, help="postfix for resid output file.")
sp.add_argument("--model_outfilename", type=str, default=None, help="postfix for foreground model file.")
sp.add_argument("--gain_outfilename", type=str, default=None, help="path for writing fitted gains.")
sp.add_argument("--clobber", action="store_true", default="False", help="Overwrite existing outputs.")
sp.add_argument("--x_orientation", default="east", type=str, help="x_orientation of feeds to set in output gains.")
sp.add_argument(
"--bllen_min", default=0.0, type=float, help="minimum baseline length to include in calibration and outputs."
)
sp.add_argument(
"--bllen_max", default=np.inf, type=float, help="maximum baseline length to include in calbration and outputs."
)
sp.add_argument(
"--bl_ew_min",
default=0.0,
type=float,
help="minimum EW baseline component to include in calibration and outputs.",
)
sp.add_argument(
"--ex_ants", default=None, type=int, nargs="+", help="Antennas to exclude from calibration and modeling."
)
sp.add_argument(
"--select_ants",
default=None,
type=int,
nargs="+",
help="Antennas to select exclusively for calibration and modeling.",
)
sp.add_argument("--gpu_index", default=None, type=int, help="Index of GPU to run on (if on a multi-GPU machine).")
sp.add_argument("--gpu_memory_limit", default=None, type=int, help="Limit GPU memory use to this many GBytes.")
sp.add_argument("--precision", default=32, type=int, help="Number of bits to keep track of.")
return ap
def fitting_argparser():
ap = input_output_parser()
sp = ap.add_argument_group("General Fitting Arguments.")
sp.add_argument(
"--tol",
type=float,
default=1e-14,
help="Stop gradient descent after cost function converges to within this value.",
)
sp.add_argument(
"--optimizer", type=str, default="Adamax", help="First order optimizer to use for gradient descent."
)
sp.add_argument("--maxsteps", type=int, default=10000, help="Max number of steps to iterate during optimization.")
sp.add_argument("--verbose", default=False, action="store_true", help="lots of text ouputs.")
sp.add_argument(
"--use_min",
default=False,
action="store_true",
help="Use params for mimimum cost function derived. Otherwise, use the params last visited by the descent. Avoids momentum overshoot.",
)
sp.add_argument(
"--use_redundancy",
default=False,
action="store_true",
help="Model redundant visibilities with the same set of foreground parameters.",
)
sp.add_argument(
"--correct_model", default=True, action="store_true", help="Remove gain effects from foreground model."
)
sp.add_argument(
"--correct_resid", default=False, action="store_true", help="Apply fitted gains to the fitted residuals."
)
sp.add_argument(
"--graph_mode",
default=False,
action="store_true",
help="Pre-compile computational graph before running gradient descent. Not reccomended for GPUs.",
)
sp.add_argument(
"--init_guesses_from_previous_time_step",
default=False,
action="store_true",
help="initialize gain and foreground guesses from previous time step when calibrating multiple times.",
)
sp.add_argument("--learning_rate", type=float, default=1e-2, help="gradient descent learning rate.")
sp.add_argument(
"--red_tol", type=float, default=1.0, help="Tolerance for determining redundancy between baselines [meters]."
)
sp.add_argument(
"--skip_threshold",
type=float,
default=0.5,
help="Skip and flag time/polarization if more then this fractionf of data is flagged.",
)
sp.add_argument("--model_regularization", type=str, default="post_hoc")
sp.add_argument(
"--nsamples_in_weights", default=False, action="store_true", help="Weight contributions to MSE by nsamples."
)
sp.add_argument(
"--use_model_snr_weights",
default=False,
action="store_true",
help="If True, weight contributions to MSE as proportional to SNR.",
)
sp.add_argument(
"--use_autocorrs_in_weights",
default=False,
action="store_true",
help="If True, use autocorrelations to derive relative SNR weights.",
)
return ap
def dpss_fit_argparser():
ap = fitting_argparser()
sp = ap.add_argument_group("DPSS Specific Fitting Arguments.")
sp.add_argument("--horizon", default=1.0, type=float, help="Fraction of horizon delay to model with DPSS modes.")
sp.add_argument("--min_dly", default=0.0, type=float, help="Minimum delay [ns] to model with DPSS modes.")
sp.add_argument(
"--offset", default=0.0, type=float, help="Offset from horizon delay [ns] to model with DPSS modes."
)
return ap
| 39.659114 | 170 | 0.637591 | [
"MIT"
] | aewallwi/calamity | calamity/calibration.py | 77,018 | Python |
import os
SQLALCHEMY_DATABASE_URI = os.environ.get('DB_HOST')
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False | 28.6 | 51 | 0.818182 | [
"MIT"
] | cojok/python_play_around | flask_qa/flask_qa/settings.py | 143 | Python |
"""
Script for processing image (Pre OCR)
"""
import cv2
import numpy as np
import sys
import os.path
if len(sys.argv) != 3:
print "%s input_file output_file" % (sys.argv[0])
sys.exit()
else:
input_file = sys.argv[1]
output_file = sys.argv[2]
if not os.path.isfile(input_file):
print "No such file '%s'" % input_file
sys.exit()
DEBUG = 0
# Determine pixel intensity
# Apparently human eyes register colors differently.
# TVs use this formula to determine
# pixel intensity = 0.30R + 0.59G + 0.11B
def ii(xx, yy):
global img, img_y, img_x
if yy >= img_y or xx >= img_x:
#print "pixel out of bounds ("+str(y)+","+str(x)+")"
return 0
pixel = img[yy][xx]
return 0.30 * pixel[2] + 0.59 * pixel[1] + 0.11 * pixel[0]
# A quick test to check whether the contour is
# a connected shape
def connected(contour):
first = contour[0][0]
last = contour[len(contour) - 1][0]
return abs(first[0] - last[0]) <= 1 and abs(first[1] - last[1]) <= 1
# Helper function to return a given contour
def c(index):
global contours
return contours[index]
# Count the number of real children
def count_children(index, h_, contour):
# No children
if h_[index][2] < 0:
return 0
else:
#If the first child is a contour we care about
# then count it, otherwise don't
if keep(c(h_[index][2])):
count = 1
else:
count = 0
# Also count all of the child's siblings and their children
count += count_siblings(h_[index][2], h_, contour, True)
return count
# Quick check to test if the contour is a child
def is_child(index, h_):
return get_parent(index, h_) > 0
# Get the first parent of the contour that we care about
def get_parent(index, h_):
parent = h_[index][3]
while not keep(c(parent)) and parent > 0:
parent = h_[parent][3]
return parent
# Count the number of relevant siblings of a contour
def count_siblings(index, h_, contour, inc_children=False):
# Include the children if necessary
if inc_children:
count = count_children(index, h_, contour)
else:
count = 0
# Look ahead
p_ = h_[index][0]
while p_ > 0:
if keep(c(p_)):
count += 1
if inc_children:
count += count_children(p_, h_, contour)
p_ = h_[p_][0]
# Look behind
n = h_[index][1]
while n > 0:
if keep(c(n)):
count += 1
if inc_children:
count += count_children(n, h_, contour)
n = h_[n][1]
return count
# Whether we care about this contour
def keep(contour):
return keep_box(contour) and connected(contour)
# Whether we should keep the containing box of this
# contour based on it's shape
def keep_box(contour):
xx, yy, w_, h_ = cv2.boundingRect(contour)
# width and height need to be floats
w_ *= 1.0
h_ *= 1.0
# Test it's shape - if it's too oblong or tall it's
# probably not a real character
if w_ / h_ < 0.1 or w_ / h_ > 10:
if DEBUG:
print "\t Rejected because of shape: (" + str(xx) + "," + str(yy) + "," + str(w_) + "," + str(h_) + ")" + \
str(w_ / h_)
return False
# check size of the box
if ((w_ * h_) > ((img_x * img_y) / 5)) or ((w_ * h_) < 15):
if DEBUG:
print "\t Rejected because of size"
return False
return True
def include_box(index, h_, contour):
if DEBUG:
print str(index) + ":"
if is_child(index, h_):
print "\tIs a child"
print "\tparent " + str(get_parent(index, h_)) + " has " + str(
count_children(get_parent(index, h_), h_, contour)) + " children"
print "\thas " + str(count_children(index, h_, contour)) + " children"
if is_child(index, h_) and count_children(get_parent(index, h_), h_, contour) <= 2:
if DEBUG:
print "\t skipping: is an interior to a letter"
return False
if count_children(index, h_, contour) > 2:
if DEBUG:
print "\t skipping, is a container of letters"
return False
if DEBUG:
print "\t keeping"
return True
# Load the image
orig_img = cv2.imread(input_file)
# Add a border to the image for processing sake
img = cv2.copyMakeBorder(orig_img, 50, 50, 50, 50, cv2.BORDER_CONSTANT)
# Calculate the width and height of the image
img_y = len(img)
img_x = len(img[0])
if DEBUG:
print "Image is " + str(len(img)) + "x" + str(len(img[0]))
#Split out each channel
blue, green, red = cv2.split(img)
# Run canny edge detection on each channel
blue_edges = cv2.Canny(blue, 200, 250)
green_edges = cv2.Canny(green, 200, 250)
red_edges = cv2.Canny(red, 200, 250)
# Join edges back into image
edges = blue_edges | green_edges | red_edges
# Find the contours
contours, hierarchy = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
hierarchy = hierarchy[0]
if DEBUG:
processed = edges.copy()
rejected = edges.copy()
# These are the boxes that we are determining
keepers = []
# For each contour, find the bounding rectangle and decide
# if it's one we care about
for index_, contour_ in enumerate(contours):
if DEBUG:
print "Processing #%d" % index_
x, y, w, h = cv2.boundingRect(contour_)
# Check the contour and it's bounding box
if keep(contour_) and include_box(index_, hierarchy, contour_):
# It's a winner!
keepers.append([contour_, [x, y, w, h]])
if DEBUG:
cv2.rectangle(processed, (x, y), (x + w, y + h), (100, 100, 100), 1)
cv2.putText(processed, str(index_), (x, y - 5), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))
else:
if DEBUG:
cv2.rectangle(rejected, (x, y), (x + w, y + h), (100, 100, 100), 1)
cv2.putText(rejected, str(index_), (x, y - 5), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))
# Make a white copy of our image
new_image = edges.copy()
new_image.fill(255)
boxes = []
# For each box, find the foreground and background intensities
for index_, (contour_, box) in enumerate(keepers):
# Find the average intensity of the edge pixels to
# determine the foreground intensity
fg_int = 0.0
for p in contour_:
fg_int += ii(p[0][0], p[0][1])
fg_int /= len(contour_)
if DEBUG:
print "FG Intensity for #%d = %d" % (index_, fg_int)
# Find the intensity of three pixels going around the
# outside of each corner of the bounding box to determine
# the background intensity
x_, y_, width, height = box
bg_int = \
[
# bottom left corner 3 pixels
ii(x_ - 1, y_ - 1),
ii(x_ - 1, y_),
ii(x_, y_ - 1),
# bottom right corner 3 pixels
ii(x_ + width + 1, y_ - 1),
ii(x_ + width, y_ - 1),
ii(x_ + width + 1, y_),
# top left corner 3 pixels
ii(x_ - 1, y_ + height + 1),
ii(x_ - 1, y_ + height),
ii(x_, y_ + height + 1),
# top right corner 3 pixels
ii(x_ + width + 1, y_ + height + 1),
ii(x_ + width, y_ + height + 1),
ii(x_ + width + 1, y_ + height)
]
# Find the median of the background
# pixels determined above
bg_int = np.median(bg_int)
if DEBUG:
print "BG Intensity for #%d = %s" % (index_, repr(bg_int))
# Determine if the box should be inverted
if fg_int >= bg_int:
fg = 255
bg = 0
else:
fg = 0
bg = 255
# Loop through every pixel in the box and color the
# pixel accordingly
for x in range(x_, x_ + width):
for y in range(y_, y_ + height):
if y >= img_y or x >= img_x:
if DEBUG:
print "pixel out of bounds (%d,%d)" % (y, x)
continue
if ii(x, y) > fg_int:
new_image[y][x] = bg
else:
new_image[y][x] = fg
# blur a bit to improve ocr accuracy
new_image = cv2.blur(new_image, (2, 2))
cv2.imwrite(output_file, new_image)
if DEBUG:
cv2.imwrite('edges.png', edges)
cv2.imwrite('processed.png', processed)
cv2.imwrite('rejected.png', rejected)
| 27.549669 | 119 | 0.583534 | [
"MIT"
] | sheeshmohsin/insta_hack | panverification/panapp/process_image.py | 8,320 | Python |
from django.forms import (
CheckboxSelectMultiple,
EmailInput,
FileInput,
HiddenInput,
NumberInput,
PasswordInput,
Textarea,
TextInput,
URLInput,
)
from django.utils.safestring import mark_safe
from .bootstrap import get_bootstrap_setting, get_field_renderer, get_form_renderer, get_formset_renderer
from .exceptions import BootstrapError
from .text import text_value
from .utils import add_css_class, render_tag
FORM_GROUP_CLASS = "form-group"
def render_formset(formset, **kwargs):
"""Render a formset to a Bootstrap layout."""
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render()
def render_formset_errors(formset, **kwargs):
"""Render formset errors to a Bootstrap layout."""
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render_errors()
def render_form(form, **kwargs):
"""Render a form to a Bootstrap layout."""
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render()
def render_form_errors(form, type="all", **kwargs):
"""Render form errors to a Bootstrap layout."""
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render_errors(type)
def render_field(field, **kwargs):
"""Render a field to a Bootstrap layout."""
renderer_cls = get_field_renderer(**kwargs)
return renderer_cls(field, **kwargs).render()
def render_label(content, label_for=None, label_class=None, label_title=""):
"""Render a label with content."""
attrs = {}
if label_for:
attrs["for"] = label_for
if label_class:
attrs["class"] = label_class
if label_title:
attrs["title"] = label_title
return render_tag("label", attrs=attrs, content=content)
def render_button(
content,
button_type=None,
button_class="btn-primary",
size="",
href="",
name=None,
value=None,
title=None,
extra_classes="",
id="",
):
"""Render a button with content."""
attrs = {}
classes = add_css_class("btn", button_class)
size = text_value(size).lower().strip()
if size == "xs":
classes = add_css_class(classes, "btn-xs")
elif size == "sm" or size == "small":
classes = add_css_class(classes, "btn-sm")
elif size == "lg" or size == "large":
classes = add_css_class(classes, "btn-lg")
elif size == "md" or size == "medium":
pass
elif size:
raise BootstrapError(f'Parameter "size" should be "xs", "sm", "lg" or empty ("{size}" given).')
if button_type:
if button_type not in ("submit", "reset", "button", "link"):
raise BootstrapError(
f'Parameter "button_type" should be "submit", "reset", "button", "link" or empty ("{button_type}" given).'
)
if button_type != "link":
attrs["type"] = button_type
classes = add_css_class(classes, extra_classes)
attrs["class"] = classes
if href:
tag = "a"
if button_type and button_type != "link":
raise BootstrapError(f'Button of type "{button_type}" is not allowed a "href" parameter.')
attrs["href"] = href
# Specify role for link with button appearance
attrs.setdefault("role", "button")
else:
tag = "button"
if id:
attrs["id"] = id
if name:
attrs["name"] = name
if value:
attrs["value"] = value
if title:
attrs["title"] = title
return render_tag(tag, attrs=attrs, content=mark_safe(content))
def render_field_and_label(field, label, field_class="", label_for=None, label_class="", layout="", **kwargs):
"""Render a field with its label."""
if layout == "horizontal":
if not label_class:
label_class = get_bootstrap_setting("horizontal_label_class")
if not field_class:
field_class = get_bootstrap_setting("horizontal_field_class")
if not label:
label = mark_safe(" ")
label_class = add_css_class(label_class, "control-label")
html = field
if field_class:
html = f'<div class="{field_class}">{html}</div>'
if label:
html = render_label(label, label_for=label_for, label_class=label_class) + html
return html
def render_form_group(content, css_class=FORM_GROUP_CLASS):
"""Render a Bootstrap form group."""
return f'<div class="{css_class}">{content}</div>'
def is_widget_with_placeholder(widget):
"""
Return whether this widget should have a placeholder.
Only text, text area, number, e-mail, url, password, number and derived inputs have placeholders.
"""
return isinstance(widget, (TextInput, Textarea, NumberInput, EmailInput, URLInput, PasswordInput))
| 31.431373 | 123 | 0.651279 | [
"BSD-3-Clause"
] | Natureshadow/django-bootstrap4 | src/bootstrap4/forms.py | 4,809 | Python |
"""
Revision ID: 0304a_merge
Revises: 0304_remove_org_to_service, 0303a_merge
Create Date: 2019-07-29 16:18:27.467361
"""
# revision identifiers, used by Alembic.
revision = "0304a_merge"
down_revision = ("0304_remove_org_to_service", "0303a_merge")
branch_labels = None
import sqlalchemy as sa
from alembic import op
def upgrade():
pass
def downgrade():
pass
| 15.666667 | 61 | 0.75 | [
"MIT"
] | cds-snc/notification-api | migrations/versions/0304a_merge.py | 376 | Python |
import argparse
import os.path
from os import walk
import sys
import xml.etree.ElementTree
from lib.config import get_env_var
from lib.transifex import (pull_source_files_from_transifex, should_use_transifex,
pull_xtb_without_transifex, combine_override_xtb_into_original)
from lib.grd_string_replacements import get_override_file_path
PRESEARCH_SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def parse_args():
parser = argparse.ArgumentParser(description='Push strings to Transifex')
parser.add_argument('--source_string_path', nargs=1)
return parser.parse_args()
def check_args():
transifex_info = (get_env_var('TRANSIFEX_USERNAME') and
get_env_var('TRANSIFEX_PASSWORD') or
get_env_var('TRANSIFEX_API_KEY'))
message = 'TRANSIFEX_USERNAME and TRANSIFEX_PASSWORD or '\
'TRANSIFEX_API_KEY must be set'
assert transifex_info, message
def main():
args = parse_args()
check_args()
source_string_path = os.path.join(PRESEARCH_SOURCE_ROOT, args.source_string_path[0])
filename = os.path.basename(source_string_path).split('.')[0]
if should_use_transifex(source_string_path, filename):
print('Transifex: ', source_string_path)
pull_source_files_from_transifex(source_string_path, filename)
else:
print('Local: ', source_string_path)
override_path = get_override_file_path(source_string_path)
print('Transifex override: ', override_path)
override_filename = os.path.basename(override_path).split('.')[0]
override_exists = os.path.exists(override_path)
if override_exists:
pull_source_files_from_transifex(override_path, override_filename)
pull_xtb_without_transifex(source_string_path, PRESEARCH_SOURCE_ROOT)
if override_exists:
combine_override_xtb_into_original(source_string_path)
if __name__ == '__main__':
sys.exit(main())
| 38.403846 | 90 | 0.732098 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | mamylinx/presearch-core | script/pull-l10n.py | 1,997 | Python |
# Copyright (c) 2019, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pynq
from pynq import pmbus
from pynq import Device
import numpy as np
import pandas as pd
import time
import argparse
# Set up data acquisition using PYNQ's PMBus API
def setup_power_recording():
rails = pmbus.get_xrt_sysfs_rails()
#We create a recorder monitoring the three rails that have power measurement on Alveo.
#Total board power is obtained by summing together the PCI Express and Auxilliary 12V rails.
#While some current is also drawn over the PCIe 5V rail this is negligible compared to the 12V rails and isn't recorded.
#We also measure the VCC_INT power which is the primary supply to the FPGA.
recorder = pmbus.DataRecorder(rails["12v_aux"].power,
rails["12v_pex"].power,
rails["vccint"].power)
return recorder
# ## Synthetic Throughput Test
# We execute inference of a configurable-size batch of images, without data movement. We measure the latency, throughput, and power
def benchmark_synthetic(bs, nreps):
ibuf = pynq.allocate((bs,3,224,224), dtype=np.int8, target=ol.bank0)
obuf = pynq.allocate((bs,5), dtype=np.uint32, target=ol.bank0)
# Start power monitoring
pwr_rec = setup_power_recording()
pwr_rec.record(0.1)
total_duration = time.monotonic()
for i in range(nreps):
accelerator.call(ibuf, obuf, fcbuf, bs)
total_duration = time.monotonic() - total_duration
# Stop the power monitoring
pwr_rec.stop()
latency = total_duration/nreps
fps = int((nreps/total_duration)*bs)
# Aggregate board/fpga power into a Pandas dataframe
f = pwr_rec.frame
powers = pd.DataFrame(index=f.index)
powers['board_power'] = f['12v_aux_power'] + f['12v_pex_power']
powers['fpga_power'] = f['vccint_power']
return fps, latency, powers
if __name__== "__main__":
parser = argparse.ArgumentParser(description='ResNet50 inference with FINN and PYNQ on Alveo')
parser.add_argument('--xclbin', type=str, default='resnet50.xclbin', help='Accelerator image file (xclbin)')
parser.add_argument('--fcweights', type=str, default='fcweights.csv', help='FC weights file (CSV)')
parser.add_argument('--shell', type=str, default='xilinx_u250_xdma_201830_2', help='Name of compatible shell')
parser.add_argument('--bs', type=int, default=1, help='Batch size (images processed per accelerator invocation)')
parser.add_argument('--reps',type=int, default=100, help='Number of batches to run')
args = parser.parse_args()
# discover a compatible shell if there are multiple
devices = Device.devices
if len(devices) > 1:
for i in range(len(devices)):
print("{}) {}".format(i, devices[i].name))
if devices[i].name == args.shell:
print("Compatible shell found, using device",i)
Device.active_device = devices[i]
break
ol=pynq.Overlay(args.xclbin)
accelerator=ol.resnet50_1
#allocate a buffer for FC weights, targeting the Alveo DDR Bank 0
fcbuf = pynq.allocate((1000,2048), dtype=np.int8, target=ol.bank0)
# Load the weight from a CSV file and push them to the accelerator buffer:
fcweights = np.genfromtxt(args.fcweights, delimiter=',', dtype=np.int8)
#csv reader erroneously adds one extra element to the end, so remove, then reshape
fcweights = fcweights[:-1].reshape(1000,2048)
fcbuf[:] = fcweights
#Move the data to the Alveo DDR
fcbuf.sync_to_device()
fps, latency, power = benchmark_synthetic(args.bs,args.reps)
print("Throughput:",fps,"FPS")
print("Latency:",round(latency*1000,2),"ms")
print("FPGA Power:",round(power.mean()['fpga_power'],2),"Watts")
print("Board Power:",round(power.mean()['board_power'],2),"Watts")
| 43.322581 | 131 | 0.710164 | [
"BSD-3-Clause"
] | Tobi-Alonso/ResNet50-PYNQ | host/synth_bench_power.py | 5,372 | Python |
def get_workout(day):
if day == 'Monday':
return 'Chest+biceps'
elif day == 'Tuesday':
return 'Back+triceps'
elif day == 'Wednesday':
return 'Core'
elif day == 'Thursday':
return 'Legs'
elif day == 'Friday':
return 'Shoulders'
elif day in ('Saturday', 'Sunday'):
return 'Rest'
raise ValueError('Not a day')
# use a dict to sort it out
workwouts = {
'Monday': 'Chest+biceps',
'Tuesday': 'Back+triceps',
'Wednesday': 'Core',
'Thursday': 'Legs',
'Friday': 'Shoulders',
'Saturday': 'Rest',
'Sunday': 'Rest'
}
# one other way
days = 'Monday Tuesday Wednesday Thursday Friday Saturday Sunday'.split()
routines = 'Chest+biceps Back+triceps Core Legs Shoulders Rest Rest'.split()
workouts2 = dict(zip(days, routines))
print(workouts2)
def get_workout(day):
routine = workwouts.get(day)
if routine is None:
raise ValueError('Not a day')
return routine
print(get_workout('Monday')) | 25 | 76 | 0.618 | [
"MIT"
] | pruty20/100daysofcode-with-python-course | days/34-36-refactoring/refactoring_yo.py | 1,000 | Python |
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
VERSION = "0.1.0"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='anf-preview',
version=VERSION,
description='Provides a preview for upcoming Azure NetApp Files (ANF) features.',
long_description='An Azure CLI Extension for Azure NetApp Files (ANF) preview features.',
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli-extensions/tree/master/src/anf-preview',
classifiers=CLASSIFIERS,
packages=find_packages(exclude=["tests"]),
package_data={'azext_anf_preview': ['azext_metadata.json']},
install_requires=DEPENDENCIES
)
| 35.729167 | 94 | 0.609329 | [
"MIT"
] | XiangyuL-Microsoft/azure-cli-extensions | src/anf-preview/setup.py | 1,715 | Python |
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
DELETED_PREFIX = 'deleted_cinder_'
MAX_SIZE_FOR_A_LUN = '17555678822400'
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(object):
def __init__(self, **kwargs):
host = kwargs['hostname']
username = kwargs['username']
password = kwargs['password']
api_trace_pattern = kwargs['api_trace_pattern']
self.connection = netapp_api.NaServer(
host=host,
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=username,
password=password,
api_trace_pattern=api_trace_pattern)
self.ssh_client = self._init_ssh_client(host, username, password)
def _init_ssh_client(self, host, username, password):
return netapp_api.SSHUtil(
host=host,
username=username,
password=password)
def _init_features(self):
"""Set up the repository of available Data ONTAP features."""
self.features = na_utils.Features()
def get_ontap_version(self, cached=True):
"""Gets the ONTAP version."""
if cached:
return self.connection.get_ontap_version()
ontap_version = netapp_api.NaElement("system-get-version")
result = self.connection.invoke_successfully(ontap_version, True)
version_tuple = result.get_child_by_name(
'version-tuple') or netapp_api.NaElement('none')
system_version_tuple = version_tuple.get_child_by_name(
'system-version-tuple') or netapp_api.NaElement('none')
generation = system_version_tuple.get_child_content("generation")
major = system_version_tuple.get_child_content("major")
return '%(generation)s.%(major)s' % {
'generation': generation,
'major': major}
def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return major, minor
def _strip_xml_namespace(self, string):
if string.startswith('{') and '}' in string:
return string.split('}', 1)[1]
return string
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def create_lun(self, volume_name, lun_name, size, metadata,
qos_policy_group_name=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
space_reservation = metadata['SpaceReserved']
initial_size = size
ontap_version = self.get_ontap_version()
# On older ONTAP versions the extend size is limited to its
# geometry on max_resize_size. In order to remove this
# limitation we create the LUN with its maximum possible size
# and then shrink to the requested size.
if ontap_version < '9.5':
initial_size = MAX_SIZE_FOR_A_LUN
# In order to create a LUN with its maximum size (16TB),
# the space_reservation needs to be disabled
space_reservation = 'false'
params = {'path': path, 'size': str(initial_size),
'ostype': metadata['OsType'],
'space-reservation-enabled': space_reservation}
version = self.get_ontapi_version()
if version >= (1, 110):
params['use-exact-size'] = 'true'
lun_create = netapp_api.NaElement.create_node_with_children(
'lun-create-by-size',
**params)
if qos_policy_group_name:
lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
LOG.error("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s",
{'lun_name': lun_name,
'volume_name': volume_name,
'ex': ex})
if ontap_version < '9.5':
self.do_direct_resize(path, six.text_type(size))
if metadata['SpaceReserved'] == 'true':
self.set_lun_space_reservation(path, True)
def set_lun_space_reservation(self, path, flag):
"""Sets the LUN space reservation on ONTAP."""
lun_modify_space_reservation = (
netapp_api.NaElement.create_node_with_children(
'lun-set-space-reservation-info', **{
'path': path,
'enable': str(flag)}))
self.connection.invoke_successfully(lun_modify_space_reservation, True)
def destroy_lun(self, path, force=True):
"""Destroys the LUN at the path."""
lun_destroy = netapp_api.NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s", seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
"""Maps LUN to the initiator and returns LUN id assigned."""
lun_map = netapp_api.NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.connection.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except netapp_api.NaApiError as e:
code = e.code
message = e.message
LOG.warning('Error mapping LUN. Code :%(code)s, Message: '
'%(message)s', {'code': code, 'message': message})
raise
def unmap_lun(self, path, igroup_name):
"""Unmaps a LUN from given initiator."""
lun_unmap = netapp_api.NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
exc_info = sys.exc_info()
LOG.warning("Error unmapping LUN. Code :%(code)s, Message: "
"%(message)s", {'code': e.code,
'message': e.message})
# if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
six.reraise(*exc_info)
def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igroup with specified args."""
igroup_create = netapp_api.NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.connection.invoke_successfully(igroup_create, True)
def add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = netapp_api.NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.connection.invoke_successfully(igroup_add, True)
def do_direct_resize(self, path, new_size_bytes, force=True):
"""Resize the LUN."""
seg = path.split("/")
LOG.info("Resizing LUN %s directly to new size.", seg[-1])
lun_resize = netapp_api.NaElement.create_node_with_children(
'lun-resize',
**{'path': path,
'size': new_size_bytes})
if force:
lun_resize.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_resize, True)
def get_lun_geometry(self, path):
"""Gets the LUN geometry."""
geometry = {}
lun_geo = netapp_api.NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
try:
result = self.connection.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content("size")
geometry['bytes_per_sector'] = result.get_child_content(
"bytes-per-sector")
geometry['sectors_per_track'] = result.get_child_content(
"sectors-per-track")
geometry['tracks_per_cylinder'] = result.get_child_content(
"tracks-per-cylinder")
geometry['cylinders'] = result.get_child_content("cylinders")
geometry['max_resize'] = result.get_child_content(
"max-resize-size")
except Exception as e:
LOG.error("LUN %(path)s geometry failed. Message - %(msg)s",
{'path': path, 'msg': six.text_type(e)})
return geometry
def get_volume_options(self, volume_name):
"""Get the value for the volume option."""
opts = []
vol_option_list = netapp_api.NaElement("volume-options-list-info")
vol_option_list.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name("options")
if options:
opts = options.get_children()
return opts
def move_lun(self, path, new_path):
"""Moves the LUN at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug("Moving LUN %(name)s to %(new_name)s.",
{'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = netapp_api.NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
self.connection.invoke_successfully(lun_move, True)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
raise NotImplementedError()
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
raise NotImplementedError()
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
raise NotImplementedError()
def check_iscsi_initiator_exists(self, iqn):
"""Returns True if initiator exists."""
raise NotImplementedError()
def set_iscsi_chap_authentication(self, iqn, username, password):
"""Provides NetApp host's CHAP credentials to the backend."""
raise NotImplementedError()
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
raise NotImplementedError()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
raise NotImplementedError()
def _has_luns_mapped_to_initiator(self, initiator):
"""Checks whether any LUNs are mapped to the given initiator."""
lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info')
lun_list_api.add_new_child('initiator', initiator)
result = self.connection.invoke_successfully(lun_list_api, True)
lun_maps_container = result.get_child_by_name(
'lun-maps') or netapp_api.NaElement('none')
return len(lun_maps_container.get_children()) > 0
def has_luns_mapped_to_initiators(self, initiator_list):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
for initiator in initiator_list:
if self._has_luns_mapped_to_initiator(initiator):
return True
return False
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
raise NotImplementedError()
def get_performance_counter_info(self, object_name, counter_name):
"""Gets info about one or more Data ONTAP performance counters."""
api_args = {'objectname': object_name}
result = self.connection.send_request('perf-object-counter-list-info',
api_args,
enable_tunneling=False)
counters = result.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
if counter.get_child_content('name') == counter_name:
labels = []
label_list = counter.get_child_by_name(
'labels') or netapp_api.NaElement('None')
for label in label_list.get_children():
labels.extend(label.get_content().split(','))
base_counter = counter.get_child_content('base-counter')
return {
'name': counter_name,
'labels': labels,
'base-counter': base_counter,
}
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def delete_snapshot(self, volume_name, snapshot_name):
"""Deletes a volume snapshot."""
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.connection.send_request('snapshot-delete', api_args)
def create_cg_snapshot(self, volume_names, snapshot_name):
"""Creates a consistency group snapshot out of one or more flexvols.
ONTAP requires an invocation of cg-start to first fence off the
flexvols to be included in the snapshot. If cg-start returns
success, a cg-commit must be executed to finalized the snapshot and
unfence the flexvols.
"""
cg_id = self._start_cg_snapshot(volume_names, snapshot_name)
if not cg_id:
msg = _('Could not start consistency group snapshot %s.')
raise exception.VolumeBackendAPIException(data=msg % snapshot_name)
self._commit_cg_snapshot(cg_id)
def _start_cg_snapshot(self, volume_names, snapshot_name):
snapshot_init = {
'snapshot': snapshot_name,
'timeout': 'relaxed',
'volumes': [
{'volume-name': volume_name} for volume_name in volume_names
],
}
result = self.connection.send_request('cg-start', snapshot_init)
return result.get_child_content('cg-id')
def _commit_cg_snapshot(self, cg_id):
snapshot_commit = {'cg-id': cg_id}
self.connection.send_request('cg-commit', snapshot_commit)
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
raise NotImplementedError()
@utils.retry(exception.SnapshotIsBusy)
def wait_for_busy_snapshot(self, flexvol, snapshot_name):
"""Checks for and handles a busy snapshot.
If a snapshot is busy, for reasons other than cloning, an exception is
raised immediately. Otherwise, wait for a period of time for the clone
dependency to finish before giving up. If the snapshot is not busy then
no action is taken and the method exits.
"""
snapshot = self.get_snapshot(flexvol, snapshot_name)
if not snapshot['busy']:
LOG.debug("Backing consistency group snapshot %s available for "
"deletion.", snapshot_name)
return
else:
LOG.debug("Snapshot %(snap)s for vol %(vol)s is busy, waiting "
"for volume clone dependency to clear.",
{"snap": snapshot_name, "vol": flexvol})
raise exception.SnapshotIsBusy(snapshot_name=snapshot_name)
def mark_snapshot_for_deletion(self, volume, snapshot_name):
"""Mark snapshot for deletion by renaming snapshot."""
return self.rename_snapshot(
volume, snapshot_name, DELETED_PREFIX + snapshot_name)
def rename_snapshot(self, volume, current_name, new_name):
"""Renames a snapshot."""
api_args = {
'volume': volume,
'current-name': current_name,
'new-name': new_name,
}
return self.connection.send_request('snapshot-rename', api_args)
| 40.928406 | 79 | 0.622503 | [
"Apache-2.0"
] | sapcc/cinder | cinder/volume/drivers/netapp/dataontap/client/client_base.py | 17,722 | Python |
from typing import Optional
from data_structures.singly_linked_list_node import SinglyLinkedListNode
def rotate_list(head: Optional[SinglyLinkedListNode], amount: int) -> Optional[SinglyLinkedListNode]:
if not head:
return None
if not head.next:
return head
current = head
number = 1
while current.next:
number += 1
current = current.next
current.next = head
current = head
for _ in range((number - amount) % number - 1):
current = current.next
new_head = current.next
current.next = None
return new_head
| 22.074074 | 101 | 0.667785 | [
"MIT"
] | ahcode0919/python-ds-algorithms | singly_linked_lists/rotate_list.py | 596 | Python |
#/*
# * Player - One Hell of a Robot Server
# * Copyright (C) 2004
# * Andrew Howard
# *
# *
# * This library is free software; you can redistribute it and/or
# * modify it under the terms of the GNU Lesser General Public
# * License as published by the Free Software Foundation; either
# * version 2.1 of the License, or (at your option) any later version.
# *
# * This library is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# * Lesser General Public License for more details.
# *
# * You should have received a copy of the GNU Lesser General Public
# * License along with this library; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Desc: Test the camera
# Author: Andrew Howard
# Date: 15 Sep 2004
# CVS: $Id: test_camera.py 8114 2009-07-24 11:28:20Z thjc $
from playerc import *
def test_camera(client, index, context):
"""Basic test of the camera interface."""
camera = playerc_camera(client, index)
if camera.subscribe(PLAYERC_OPEN_MODE) != 0:
raise playerc_error_str()
for i in range(10):
while 1:
id = client.read()
if id == camera.info.id:
break
if context:
print context,
print "camera: [%14.3f] [%d %d %d %d]" % \
(camera.info.datatime, camera.width, camera.height,
camera.depth, camera.image_size),
print
# Save the image
filename = 'camera_%03d.ppm' % i
print 'camera: saving [%s] (only works for RGB888)' % filename
test_camera_save(camera, filename);
camera.unsubscribe()
return
def test_camera_save(camera, filename):
"""Save a camera image. Assumes the image is RGB888"""
file = open(filename, 'w+');
assert(file)
# Write ppm header
file.write('P6\n%d %d\n%d\n' % (camera.width, camera.height, 255))
# TODO: ?
# Write image data
file.write(camera.image)
return
| 28.710526 | 78 | 0.62374 | [
"BSD-3-Clause"
] | parasol-ppl/PPL_utils | physicalrobots/player/client_libs/libplayerc/bindings/python/test/test_camera.py | 2,182 | Python |
from cwltool.main import main
from .util import get_data
def test_missing_cwl_version():
"""No cwlVersion in the workflow."""
assert main([get_data('tests/wf/missing_cwlVersion.cwl')]) == 1
def test_incorrect_cwl_version():
"""Using cwlVersion: v0.1 in the workflow."""
assert main([get_data('tests/wf/wrong_cwlVersion.cwl')]) == 1
| 27.076923 | 67 | 0.713068 | [
"Apache-2.0"
] | jayvdb/cwltool | tests/test_cwl_version.py | 352 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Launches PODs"""
import json
import math
import time
from datetime import datetime as dt
from typing import Optional, Tuple
import pendulum
import tenacity
from kubernetes import client, watch
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream as kubernetes_stream
from requests.exceptions import BaseHTTPError
from airflow.exceptions import AirflowException
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.pod_generator import PodDefaults
from airflow.settings import pod_mutation_hook
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
def should_retry_start_pod(exception: Exception):
"""Check if an Exception indicates a transient error and warrants retrying"""
if isinstance(exception, ApiException):
return exception.status == 409
return False
class PodStatus:
"""Status of the PODs"""
PENDING = 'pending'
RUNNING = 'running'
FAILED = 'failed'
SUCCEEDED = 'succeeded'
class PodLauncher(LoggingMixin):
"""Launches PODS"""
def __init__(
self,
kube_client: client.CoreV1Api = None,
in_cluster: bool = True,
cluster_context: Optional[str] = None,
extract_xcom: bool = False,
):
"""
Creates the launcher.
:param kube_client: kubernetes client
:param in_cluster: whether we are in cluster
:param cluster_context: context of the cluster
:param extract_xcom: whether we should extract xcom
"""
super().__init__()
self._client = kube_client or get_kube_client(in_cluster=in_cluster, cluster_context=cluster_context)
self._watch = watch.Watch()
self.extract_xcom = extract_xcom
def run_pod_async(self, pod: V1Pod, **kwargs):
"""Runs POD asynchronously"""
pod_mutation_hook(pod)
sanitized_pod = self._client.api_client.sanitize_for_serialization(pod)
json_pod = json.dumps(sanitized_pod, indent=2)
self.log.debug('Pod Creation Request: \n%s', json_pod)
try:
resp = self._client.create_namespaced_pod(
body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs
)
self.log.debug('Pod Creation Response: %s', resp)
except Exception as e:
self.log.exception('Exception when attempting to create Namespaced Pod: %s', json_pod)
raise e
return resp
def delete_pod(self, pod: V1Pod):
"""Deletes POD"""
try:
self._client.delete_namespaced_pod(
pod.metadata.name, pod.metadata.namespace, body=client.V1DeleteOptions()
)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
@tenacity.retry(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_random_exponential(),
reraise=True,
retry=tenacity.retry_if_exception(should_retry_start_pod),
)
def start_pod(self, pod: V1Pod, startup_timeout: int = 120):
"""
Launches the pod synchronously and waits for completion.
:param pod:
:param startup_timeout: Timeout for startup of the pod (if pod is pending for too long, fails task)
:return:
"""
resp = self.run_pod_async(pod)
curr_time = dt.now()
if resp.status.start_time is None:
while self.pod_not_started(pod):
self.log.warning("Pod not yet started: %s", pod.metadata.name)
delta = dt.now() - curr_time
if delta.total_seconds() >= startup_timeout:
raise AirflowException("Pod took too long to start")
time.sleep(1)
def monitor_pod(self, pod: V1Pod, get_logs: bool) -> Tuple[State, V1Pod, Optional[str]]:
"""
Monitors a pod and returns the final state, pod and xcom result
:param pod: pod spec that will be monitored
:param get_logs: whether to read the logs locally
:return: Tuple[State, Optional[str]]
"""
if get_logs:
read_logs_since_sec = None
last_log_time = None
while True:
logs = self.read_pod_logs(pod, timestamps=True, since_seconds=read_logs_since_sec)
for line in logs:
timestamp, message = self.parse_log_line(line.decode('utf-8'))
last_log_time = pendulum.parse(timestamp)
self.log.info(message)
time.sleep(1)
if not self.base_container_is_running(pod):
break
self.log.warning('Pod %s log read interrupted', pod.metadata.name)
if last_log_time:
delta = pendulum.now() - last_log_time
# Prefer logs duplication rather than loss
read_logs_since_sec = math.ceil(delta.total_seconds())
result = None
if self.extract_xcom:
while self.base_container_is_running(pod):
self.log.info('Container %s has state %s', pod.metadata.name, State.RUNNING)
time.sleep(2)
result = self._extract_xcom(pod)
self.log.info(result)
result = json.loads(result)
while self.pod_is_running(pod):
self.log.info('Pod %s has state %s', pod.metadata.name, State.RUNNING)
time.sleep(2)
remote_pod = self.read_pod(pod)
return self._task_status(remote_pod), remote_pod, result
def parse_log_line(self, line: str) -> Tuple[str, str]:
"""
Parse K8s log line and returns the final state
:param line: k8s log line
:type line: str
:return: timestamp and log message
:rtype: Tuple[str, str]
"""
split_at = line.find(' ')
if split_at == -1:
raise Exception(f'Log not in "{{timestamp}} {{log}}" format. Got: {line}')
timestamp = line[:split_at]
message = line[split_at + 1 :].rstrip()
return timestamp, message
def _task_status(self, event):
self.log.info('Event: %s had an event of type %s', event.metadata.name, event.status.phase)
status = self.process_status(event.metadata.name, event.status.phase)
return status
def pod_not_started(self, pod: V1Pod):
"""Tests if pod has not started"""
state = self._task_status(self.read_pod(pod))
return state == State.QUEUED
def pod_is_running(self, pod: V1Pod):
"""Tests if pod is running"""
state = self._task_status(self.read_pod(pod))
return state not in (State.SUCCESS, State.FAILED)
def base_container_is_running(self, pod: V1Pod):
"""Tests if base container is running"""
event = self.read_pod(pod)
status = next(iter(filter(lambda s: s.name == 'base', event.status.container_statuses)), None)
if not status:
return False
return status.state.running is not None
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_logs(
self,
pod: V1Pod,
tail_lines: Optional[int] = None,
timestamps: bool = False,
since_seconds: Optional[int] = None,
):
"""Reads log from the POD"""
additional_kwargs = {}
if since_seconds:
additional_kwargs['since_seconds'] = since_seconds
if tail_lines:
additional_kwargs['tail_lines'] = tail_lines
try:
return self._client.read_namespaced_pod_log(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
container='base',
follow=True,
timestamps=timestamps,
_preload_content=False,
**additional_kwargs,
)
except BaseHTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_events(self, pod):
"""Reads events from the POD"""
try:
return self._client.list_namespaced_event(
namespace=pod.metadata.namespace, field_selector=f"involvedObject.name={pod.metadata.name}"
)
except BaseHTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod(self, pod: V1Pod):
"""Read POD information"""
try:
return self._client.read_namespaced_pod(pod.metadata.name, pod.metadata.namespace)
except BaseHTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
def _extract_xcom(self, pod: V1Pod):
resp = kubernetes_stream(
self._client.connect_get_namespaced_pod_exec,
pod.metadata.name,
pod.metadata.namespace,
container=PodDefaults.SIDECAR_CONTAINER_NAME,
command=['/bin/sh'],
stdin=True,
stdout=True,
stderr=True,
tty=False,
_preload_content=False,
)
try:
result = self._exec_pod_command(resp, f'cat {PodDefaults.XCOM_MOUNT_PATH}/return.json')
self._exec_pod_command(resp, 'kill -s SIGINT 1')
finally:
resp.close()
if result is None:
raise AirflowException(f'Failed to extract xcom from pod: {pod.metadata.name}')
return result
def _exec_pod_command(self, resp, command):
if resp.is_open():
self.log.info('Running command... %s\n', command)
resp.write_stdin(command + '\n')
while resp.is_open():
resp.update(timeout=1)
if resp.peek_stdout():
return resp.read_stdout()
if resp.peek_stderr():
self.log.info(resp.read_stderr())
break
return None
def process_status(self, job_id, status):
"""Process status information for the JOB"""
status = status.lower()
if status == PodStatus.PENDING:
return State.QUEUED
elif status == PodStatus.FAILED:
self.log.error('Event with job id %s Failed', job_id)
return State.FAILED
elif status == PodStatus.SUCCEEDED:
self.log.info('Event with job id %s Succeeded', job_id)
return State.SUCCESS
elif status == PodStatus.RUNNING:
return State.RUNNING
else:
self.log.error('Event: Invalid state %s on job %s', status, job_id)
return State.FAILED
| 38.148387 | 109 | 0.624472 | [
"Apache-2.0"
] | kevin0120/airflow | airflow/providers/cncf/kubernetes/utils/pod_launcher.py | 11,826 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-08-25 10:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=20, verbose_name='名称')),
('logo', models.ImageField(upload_to='', verbose_name='Logo图片')),
('first_letter', models.CharField(max_length=1, verbose_name='品牌首字母')),
],
options={
'verbose_name': '品牌',
'verbose_name_plural': '品牌',
'db_table': 'tb_brand',
},
),
migrations.CreateModel(
name='GoodsCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=10, verbose_name='名称')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subs', to='goods.GoodsCategory', verbose_name='父类别')),
],
options={
'verbose_name': '商品类别',
'verbose_name_plural': '商品类别',
'db_table': 'tb_goods_category',
},
),
migrations.CreateModel(
name='GoodsChannel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('url', models.CharField(max_length=50, verbose_name='频道页面链接')),
('sequence', models.IntegerField(verbose_name='组内顺序')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsCategory', verbose_name='顶级商品类别')),
],
options={
'verbose_name': '商品频道',
'verbose_name_plural': '商品频道',
'db_table': 'tb_goods_channel',
},
),
migrations.CreateModel(
name='GoodsChannelGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=20, verbose_name='频道组名')),
],
options={
'verbose_name': '商品频道组',
'verbose_name_plural': '商品频道组',
'db_table': 'tb_channel_group',
},
),
migrations.CreateModel(
name='SKU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('caption', models.CharField(max_length=100, verbose_name='副标题')),
('price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='单价')),
('cost_price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='进价')),
('market_price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='市场价')),
('stock', models.IntegerField(default=0, verbose_name='库存')),
('sales', models.IntegerField(default=0, verbose_name='销量')),
('comments', models.IntegerField(default=0, verbose_name='评价数')),
('is_launched', models.BooleanField(default=True, verbose_name='是否上架销售')),
('default_image', models.ImageField(blank=True, default='', max_length=200, null=True, upload_to='', verbose_name='默认图片')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='goods.GoodsCategory', verbose_name='从属类别')),
],
options={
'verbose_name': '商品SKU',
'verbose_name_plural': '商品SKU',
'db_table': 'tb_sku',
},
),
migrations.CreateModel(
name='SKUImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('image', models.ImageField(upload_to='', verbose_name='图片')),
('sku', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.SKU', verbose_name='sku')),
],
options={
'verbose_name': 'SKU图片',
'verbose_name_plural': 'SKU图片',
'db_table': 'tb_sku_image',
},
),
migrations.CreateModel(
name='SKUSpecification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
],
options={
'verbose_name': 'SKU规格',
'verbose_name_plural': 'SKU规格',
'db_table': 'tb_sku_specification',
},
),
migrations.CreateModel(
name='SpecificationOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('value', models.CharField(max_length=20, verbose_name='选项值')),
],
options={
'verbose_name': '规格选项',
'verbose_name_plural': '规格选项',
'db_table': 'tb_specification_option',
},
),
migrations.CreateModel(
name='SPU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('sales', models.IntegerField(default=0, verbose_name='销量')),
('comments', models.IntegerField(default=0, verbose_name='评价数')),
('desc_detail', models.TextField(default='', verbose_name='详细介绍')),
('desc_pack', models.TextField(default='', verbose_name='包装信息')),
('desc_service', models.TextField(default='', verbose_name='售后服务')),
('brand', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='goods.Brand', verbose_name='品牌')),
('category1', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='cat1_spu', to='goods.GoodsCategory', verbose_name='一级类别')),
('category2', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='cat2_spu', to='goods.GoodsCategory', verbose_name='二级类别')),
('category3', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='cat3_spu', to='goods.GoodsCategory', verbose_name='三级类别')),
],
options={
'verbose_name': '商品SPU',
'verbose_name_plural': '商品SPU',
'db_table': 'tb_spu',
},
),
migrations.CreateModel(
name='SPUSpecification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=20, verbose_name='规格名称')),
('spu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='specs', to='goods.SPU', verbose_name='商品SPU')),
],
options={
'verbose_name': '商品SPU规格',
'verbose_name_plural': '商品SPU规格',
'db_table': 'tb_spu_specification',
},
),
migrations.AddField(
model_name='specificationoption',
name='spec',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='options', to='goods.SPUSpecification', verbose_name='规格'),
),
migrations.AddField(
model_name='skuspecification',
name='option',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='goods.SpecificationOption', verbose_name='规格值'),
),
migrations.AddField(
model_name='skuspecification',
name='sku',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='specs', to='goods.SKU', verbose_name='sku'),
),
migrations.AddField(
model_name='skuspecification',
name='spec',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='goods.SPUSpecification', verbose_name='规格名称'),
),
migrations.AddField(
model_name='sku',
name='spu',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.SPU', verbose_name='商品'),
),
migrations.AddField(
model_name='goodschannel',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsChannelGroup', verbose_name='频道组名'),
),
]
| 52.516432 | 181 | 0.578759 | [
"MIT"
] | sxfang32/meiduo_29 | meiduo_mall/meiduo_mall/apps/goods/migrations/0001_initial.py | 11,722 | Python |
"""
Low-level wrapper for PortMidi library
Copied straight from Grant Yoshida's portmidizero, with slight
modifications.
"""
import sys
from ctypes import (CDLL, CFUNCTYPE, POINTER, Structure, c_char_p,
c_int, c_long, c_uint, c_void_p, cast,
create_string_buffer, byref)
import ctypes.util
dll_name = ''
if sys.platform == 'darwin':
dll_name = ctypes.util.find_library('libportmidi.dylib')
elif sys.platform in ('win32', 'cygwin'):
dll_name = 'portmidi.dll'
else:
dll_name = 'libportmidi.so'
lib = CDLL(dll_name)
null = None
false = 0
true = 1
# portmidi.h
# From portmidi.h
PM_HOST_ERROR_MSG_LEN = 256
def get_host_error_message():
"""Return host error message."""
buf = create_string_buffer(PM_HOST_ERROR_MSG_LEN)
lib.Pm_GetHostErrorText(buf, PM_HOST_ERROR_MSG_LEN)
return buf.raw.decode().rstrip('\0')
PmError = c_int
# PmError enum
pmNoError = 0
pmHostError = -10000
pmInvalidDeviceId = -9999
pmInsufficientMemory = -9989
pmBufferTooSmall = -9979
pmBufferOverflow = -9969
pmBadPtr = -9959
pmBadData = -9994
pmInternalError = -9993
pmBufferMaxSize = -9992
lib.Pm_Initialize.restype = PmError
lib.Pm_Terminate.restype = PmError
PmDeviceID = c_int
PortMidiStreamPtr = c_void_p
PmStreamPtr = PortMidiStreamPtr
PortMidiStreamPtrPtr = POINTER(PortMidiStreamPtr)
lib.Pm_HasHostError.restype = c_int
lib.Pm_HasHostError.argtypes = [PortMidiStreamPtr]
lib.Pm_GetErrorText.restype = c_char_p
lib.Pm_GetErrorText.argtypes = [PmError]
lib.Pm_GetHostErrorText.argtypes = [c_char_p, c_uint]
pmNoDevice = -1
class PmDeviceInfo(Structure):
_fields_ = [("structVersion", c_int),
("interface", c_char_p),
("name", c_char_p),
("is_input", c_int),
("is_output", c_int),
("opened", c_int)]
PmDeviceInfoPtr = POINTER(PmDeviceInfo)
lib.Pm_CountDevices.restype = c_int
lib.Pm_GetDefaultOutputDeviceID.restype = PmDeviceID
lib.Pm_GetDefaultInputDeviceID.restype = PmDeviceID
PmTimestamp = c_long
PmTimeProcPtr = CFUNCTYPE(PmTimestamp, c_void_p)
NullTimeProcPtr = cast(null, PmTimeProcPtr)
# PmBefore is not defined
lib.Pm_GetDeviceInfo.argtypes = [PmDeviceID]
lib.Pm_GetDeviceInfo.restype = PmDeviceInfoPtr
lib.Pm_OpenInput.restype = PmError
lib.Pm_OpenInput.argtypes = [PortMidiStreamPtrPtr,
PmDeviceID,
c_void_p,
c_long,
PmTimeProcPtr,
c_void_p]
lib.Pm_OpenOutput.restype = PmError
lib.Pm_OpenOutput.argtypes = [PortMidiStreamPtrPtr,
PmDeviceID,
c_void_p,
c_long,
PmTimeProcPtr,
c_void_p,
c_long]
lib.Pm_SetFilter.restype = PmError
lib.Pm_SetFilter.argtypes = [PortMidiStreamPtr, c_long]
lib.Pm_SetChannelMask.restype = PmError
lib.Pm_SetChannelMask.argtypes = [PortMidiStreamPtr, c_int]
lib.Pm_Abort.restype = PmError
lib.Pm_Abort.argtypes = [PortMidiStreamPtr]
lib.Pm_Close.restype = PmError
lib.Pm_Close.argtypes = [PortMidiStreamPtr]
PmMessage = c_long
class PmEvent(Structure):
_fields_ = [("message", PmMessage),
("timestamp", PmTimestamp)]
PmEventPtr = POINTER(PmEvent)
lib.Pm_Read.restype = PmError
lib.Pm_Read.argtypes = [PortMidiStreamPtr, PmEventPtr, c_long]
lib.Pm_Poll.restype = PmError
lib.Pm_Poll.argtypes = [PortMidiStreamPtr]
lib.Pm_Write.restype = PmError
lib.Pm_Write.argtypes = [PortMidiStreamPtr, PmEventPtr, c_long]
lib.Pm_WriteShort.restype = PmError
lib.Pm_WriteShort.argtypes = [PortMidiStreamPtr, PmTimestamp, c_long]
lib.Pm_WriteSysEx.restype = PmError
lib.Pm_WriteSysEx.argtypes = [PortMidiStreamPtr, PmTimestamp, c_char_p]
# porttime.h
# PtError enum
PtError = c_int
ptNoError = 0
ptHostError = -10000
ptAlreadyStarted = -9999
ptAlreadyStopped = -9998
ptInsufficientMemory = -9997
PtTimestamp = c_long
PtCallback = CFUNCTYPE(PmTimestamp, c_void_p)
lib.Pt_Start.restype = PtError
lib.Pt_Start.argtypes = [c_int, PtCallback, c_void_p]
lib.Pt_Stop.restype = PtError
lib.Pt_Started.restype = c_int
lib.Pt_Time.restype = PtTimestamp
| 25.76506 | 71 | 0.697919 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | EnjoyLifeFund/macHighSierra-py36-pkgs | mido/backends/portmidi_init.py | 4,277 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from niftynet.layer.base_layer import TrainableLayer
from niftynet.layer.convolution import ConvolutionalLayer as Conv
from niftynet.layer.downsample import DownSampleLayer as Down
from niftynet.layer.residual_unit import ResidualUnit as ResUnit
class DownBlock(TrainableLayer):
def __init__(self,
n_output_chns=4,
kernel_size=3,
downsample_kernel_size=2,
downsample_stride=2,
acti_func='relu',
w_initializer=None,
w_regularizer=None,
type_string='bn_acti_conv',
name='res-downsample'):
super(TrainableLayer, self).__init__(name=name)
self.n_output_chns = n_output_chns
self.kernel_size = kernel_size
self.downsample_kernel_size = downsample_kernel_size
self.downsample_stride = downsample_stride
self.acti_func = acti_func
self.conv_param = {'w_initializer': w_initializer,
'w_regularizer': w_regularizer}
self.type_string = type_string
def layer_op(self, inputs, is_training=True):
"""
Consists of::
(inputs)--conv_0-o-conv_1--conv_2-+-(conv_res)--down_sample--
| |
o----------------o
conv_0, conv_res is also returned for feature forwarding purpose
"""
conv_0 = Conv(n_output_chns=self.n_output_chns,
kernel_size=self.kernel_size,
acti_func=self.acti_func,
with_bias=False, feature_normalization='batch',
**self.conv_param)(inputs, is_training)
conv_res = ResUnit(n_output_chns=self.n_output_chns,
kernel_size=self.kernel_size,
acti_func=self.acti_func,
type_string=self.type_string,
**self.conv_param)(conv_0, is_training)
conv_down = Down('Max',
kernel_size=self.downsample_kernel_size,
stride=self.downsample_stride)(conv_res)
return conv_down, conv_0, conv_res
| 41.818182 | 73 | 0.582609 | [
"Apache-2.0"
] | BRAINSia/NiftyNet | niftynet/layer/downsample_res_block.py | 2,300 | Python |
#!/usr/bin/env python3
# -*- encoding=utf-8 -*-
# description:
# author:jack
# create_time: 2018/9/17
"""
desc:pass
"""
class __init__:
pass
if __name__ == '__main__':
pass | 10.555556 | 26 | 0.6 | [
"Apache-2.0"
] | Mryan2005/bot-sdk-python | dueros/directive/Base/__init__.py | 190 | Python |
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
from tensorflow.keras.layers.experimental.preprocessing import Resizing
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tf_utils.callbacks import schedule_fn2
from tf_utils.dogsCatsDataAdvanced import DOGSCATS
IMAGENET_SIZE = 224
IMAGENET_DEPTH = 3
IMAGENET_SHAPE = (IMAGENET_SIZE, IMAGENET_SIZE, IMAGENET_DEPTH)
def build_model(img_shape, num_classes) -> Model:
base_model = MobileNetV2(
include_top=False,
weights="imagenet",
input_shape=IMAGENET_SHAPE
)
num_layers = len(base_model.layers)
print(f"Number of layers in the base model: {num_layers}")
fine_tune_at = num_layers - 10
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
input_img = Input(shape=img_shape)
x = Rescaling(scale=2.0, offset=-1.0)(input_img)
x = Resizing(height=IMAGENET_SIZE, width=IMAGENET_SIZE)(x)
x = base_model(x)
x = GlobalAveragePooling2D()(x)
x = Dense(units=num_classes)(x)
y_pred = Activation("softmax")(x)
model = Model(
inputs=[input_img],
outputs=[y_pred]
)
model.summary()
return model
if __name__ == "__main__":
"""
Best model from chapter 9_2: 0.9034 accuracy
Best model from chapter 9_7: 0.9614 accuracy
"""
data = DOGSCATS()
train_dataset = data.get_train_set()
val_dataset = data.get_val_set()
test_dataset = data.get_test_set()
img_shape = data.img_shape
num_classes = data.num_classes
# Global params
epochs = 100
model = build_model(
img_shape,
num_classes
)
opt = Adam(learning_rate=5e-4)
model.compile(
loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"]
)
lrs_callback = LearningRateScheduler(
schedule=schedule_fn2,
verbose=1
)
es_callback = EarlyStopping(
monitor="val_loss",
patience=30,
verbose=1,
restore_best_weights=True
)
model.fit(
train_dataset,
verbose=1,
epochs=epochs,
callbacks=[lrs_callback, es_callback],
validation_data=val_dataset,
)
scores = model.evaluate(
val_dataset,
verbose=0
)
print(f"Scores: {scores}")
| 25.259259 | 72 | 0.693182 | [
"MIT"
] | thisisjako/UdemyTF | Chapter9_AdvancedDL/Chapter9_7_AdvancedTechniques2/dogsCatsTransferLearning.py | 2,728 | Python |
# IDLSave - a python module to read IDL 'save' files
# Copyright (c) 2010 Thomas P. Robitaille
# Many thanks to Craig Markwardt for publishing the Unofficial Format
# Specification for IDL .sav files, without which this Python module would not
# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt).
# This code was developed by with permission from ITT Visual Information
# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems,
# Inc. for their Interactive Data Language software.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__all__ = ['readsav']
import struct
import numpy as np
from numpy.compat import asstr
import tempfile
import zlib
import warnings
# Define the different data types that can be found in an IDL save file
DTYPE_DICT = {1: '>u1',
2: '>i2',
3: '>i4',
4: '>f4',
5: '>f8',
6: '>c8',
7: '|O',
8: '|O',
9: '>c16',
10: '|O',
11: '|O',
12: '>u2',
13: '>u4',
14: '>i8',
15: '>u8'}
# Define the different record types that can be found in an IDL save file
RECTYPE_DICT = {0: "START_MARKER",
1: "COMMON_VARIABLE",
2: "VARIABLE",
3: "SYSTEM_VARIABLE",
6: "END_MARKER",
10: "TIMESTAMP",
12: "COMPILED",
13: "IDENTIFICATION",
14: "VERSION",
15: "HEAP_HEADER",
16: "HEAP_DATA",
17: "PROMOTE64",
19: "NOTICE",
20: "DESCRIPTION"}
# Define a dictionary to contain structure definitions
STRUCT_DICT = {}
def _align_32(f):
'''Align to the next 32-bit position in a file'''
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _skip_bytes(f, n):
'''Skip `n` bytes'''
f.read(n)
return
def _read_bytes(f, n):
'''Read the next `n` bytes'''
return f.read(n)
def _read_byte(f):
'''Read a single byte'''
return np.uint8(struct.unpack('>B', f.read(4)[:1])[0])
def _read_long(f):
'''Read a signed 32-bit integer'''
return np.int32(struct.unpack('>l', f.read(4))[0])
def _read_int16(f):
'''Read a signed 16-bit integer'''
return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])
def _read_int32(f):
'''Read a signed 32-bit integer'''
return np.int32(struct.unpack('>i', f.read(4))[0])
def _read_int64(f):
'''Read a signed 64-bit integer'''
return np.int64(struct.unpack('>q', f.read(8))[0])
def _read_uint16(f):
'''Read an unsigned 16-bit integer'''
return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0])
def _read_uint32(f):
'''Read an unsigned 32-bit integer'''
return np.uint32(struct.unpack('>I', f.read(4))[0])
def _read_uint64(f):
'''Read an unsigned 64-bit integer'''
return np.uint64(struct.unpack('>Q', f.read(8))[0])
def _read_float32(f):
'''Read a 32-bit float'''
return np.float32(struct.unpack('>f', f.read(4))[0])
def _read_float64(f):
'''Read a 64-bit float'''
return np.float64(struct.unpack('>d', f.read(8))[0])
class Pointer(object):
'''Class used to define pointers'''
def __init__(self, index):
self.index = index
return
class ObjectPointer(Pointer):
'''Class used to define object pointers'''
pass
def _read_string(f):
'''Read a string'''
length = _read_long(f)
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
chars = asstr(chars)
else:
chars = ''
return chars
def _read_string_data(f):
'''Read a data string (length is specified twice)'''
length = _read_long(f)
if length > 0:
length = _read_long(f)
string_data = _read_bytes(f, length)
_align_32(f)
else:
string_data = ''
return string_data
def _read_data(f, dtype):
'''Read a variable with a specified data type'''
if dtype == 1:
if _read_int32(f) != 1:
raise Exception("Error occurred while reading byte variable")
return _read_byte(f)
elif dtype == 2:
return _read_int16(f)
elif dtype == 3:
return _read_int32(f)
elif dtype == 4:
return _read_float32(f)
elif dtype == 5:
return _read_float64(f)
elif dtype == 6:
real = _read_float32(f)
imag = _read_float32(f)
return np.complex64(real + imag * 1j)
elif dtype == 7:
return _read_string_data(f)
elif dtype == 8:
raise Exception("Should not be here - please report this")
elif dtype == 9:
real = _read_float64(f)
imag = _read_float64(f)
return np.complex128(real + imag * 1j)
elif dtype == 10:
return Pointer(_read_int32(f))
elif dtype == 11:
return ObjectPointer(_read_int32(f))
elif dtype == 12:
return _read_uint16(f)
elif dtype == 13:
return _read_uint32(f)
elif dtype == 14:
return _read_int64(f)
elif dtype == 15:
return _read_uint64(f)
else:
raise Exception("Unknown IDL type: %i - please report this" % dtype)
def _read_structure(f, array_desc, struct_desc):
'''
Read a structure, with the array and structure descriptors given as
`array_desc` and `structure_desc` respectively.
'''
nrows = array_desc['nelements']
columns = struct_desc['tagtable']
dtype = []
for col in columns:
if col['structure'] or col['array']:
dtype.append(((col['name'].lower(), col['name']), np.object_))
else:
if col['typecode'] in DTYPE_DICT:
dtype.append(((col['name'].lower(), col['name']),
DTYPE_DICT[col['typecode']]))
else:
raise Exception("Variable type %i not implemented" %
col['typecode'])
structure = np.recarray((nrows, ), dtype=dtype)
for i in range(nrows):
for col in columns:
dtype = col['typecode']
if col['structure']:
structure[col['name']][i] = _read_structure(f,
struct_desc['arrtable'][col['name']],
struct_desc['structtable'][col['name']])
elif col['array']:
structure[col['name']][i] = _read_array(f, dtype,
struct_desc['arrtable'][col['name']])
else:
structure[col['name']][i] = _read_data(f, dtype)
# Reshape structure if needed
if array_desc['ndims'] > 1:
dims = array_desc['dims'][:int(array_desc['ndims'])]
dims.reverse()
structure = structure.reshape(dims)
return structure
def _read_array(f, typecode, array_desc):
'''
Read an array of type `typecode`, with the array descriptor given as
`array_desc`.
'''
if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]:
if typecode == 1:
nbytes = _read_int32(f)
if nbytes != array_desc['nbytes']:
warnings.warn("Not able to verify number of bytes from header")
# Read bytes as numpy array
array = np.frombuffer(f.read(array_desc['nbytes']),
dtype=DTYPE_DICT[typecode])
elif typecode in [2, 12]:
# These are 2 byte types, need to skip every two as they are not packed
array = np.frombuffer(f.read(array_desc['nbytes']*2),
dtype=DTYPE_DICT[typecode])[1::2]
else:
# Read bytes into list
array = []
for i in range(array_desc['nelements']):
dtype = typecode
data = _read_data(f, dtype)
array.append(data)
array = np.array(array, dtype=np.object_)
# Reshape array if needed
if array_desc['ndims'] > 1:
dims = array_desc['dims'][:int(array_desc['ndims'])]
dims.reverse()
array = array.reshape(dims)
# Go to next alignment position
_align_32(f)
return array
def _read_record(f):
'''Function to read in a full record'''
record = {'rectype': _read_long(f)}
nextrec = _read_uint32(f)
nextrec += _read_uint32(f) * 2**32
_skip_bytes(f, 4)
if not record['rectype'] in RECTYPE_DICT:
raise Exception("Unknown RECTYPE: %i" % record['rectype'])
record['rectype'] = RECTYPE_DICT[record['rectype']]
if record['rectype'] in ["VARIABLE", "HEAP_DATA"]:
if record['rectype'] == "VARIABLE":
record['varname'] = _read_string(f)
else:
record['heap_index'] = _read_long(f)
_skip_bytes(f, 4)
rectypedesc = _read_typedesc(f)
if rectypedesc['typecode'] == 0:
if nextrec == f.tell():
record['data'] = None # Indicates NULL value
else:
raise ValueError("Unexpected type code: 0")
else:
varstart = _read_long(f)
if varstart != 7:
raise Exception("VARSTART is not 7")
if rectypedesc['structure']:
record['data'] = _read_structure(f, rectypedesc['array_desc'],
rectypedesc['struct_desc'])
elif rectypedesc['array']:
record['data'] = _read_array(f, rectypedesc['typecode'],
rectypedesc['array_desc'])
else:
dtype = rectypedesc['typecode']
record['data'] = _read_data(f, dtype)
elif record['rectype'] == "TIMESTAMP":
_skip_bytes(f, 4*256)
record['date'] = _read_string(f)
record['user'] = _read_string(f)
record['host'] = _read_string(f)
elif record['rectype'] == "VERSION":
record['format'] = _read_long(f)
record['arch'] = _read_string(f)
record['os'] = _read_string(f)
record['release'] = _read_string(f)
elif record['rectype'] == "IDENTIFICATON":
record['author'] = _read_string(f)
record['title'] = _read_string(f)
record['idcode'] = _read_string(f)
elif record['rectype'] == "NOTICE":
record['notice'] = _read_string(f)
elif record['rectype'] == "DESCRIPTION":
record['description'] = _read_string_data(f)
elif record['rectype'] == "HEAP_HEADER":
record['nvalues'] = _read_long(f)
record['indices'] = [_read_long(f) for _ in range(record['nvalues'])]
elif record['rectype'] == "COMMONBLOCK":
record['nvars'] = _read_long(f)
record['name'] = _read_string(f)
record['varnames'] = [_read_string(f) for _ in range(record['nvars'])]
elif record['rectype'] == "END_MARKER":
record['end'] = True
elif record['rectype'] == "UNKNOWN":
warnings.warn("Skipping UNKNOWN record")
elif record['rectype'] == "SYSTEM_VARIABLE":
warnings.warn("Skipping SYSTEM_VARIABLE record")
else:
raise Exception("record['rectype']=%s not implemented" %
record['rectype'])
f.seek(nextrec)
return record
def _read_typedesc(f):
'''Function to read in a type descriptor'''
typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)}
if typedesc['varflags'] & 2 == 2:
raise Exception("System variables not implemented")
typedesc['array'] = typedesc['varflags'] & 4 == 4
typedesc['structure'] = typedesc['varflags'] & 32 == 32
if typedesc['structure']:
typedesc['array_desc'] = _read_arraydesc(f)
typedesc['struct_desc'] = _read_structdesc(f)
elif typedesc['array']:
typedesc['array_desc'] = _read_arraydesc(f)
return typedesc
def _read_arraydesc(f):
'''Function to read in an array descriptor'''
arraydesc = {'arrstart': _read_long(f)}
if arraydesc['arrstart'] == 8:
_skip_bytes(f, 4)
arraydesc['nbytes'] = _read_long(f)
arraydesc['nelements'] = _read_long(f)
arraydesc['ndims'] = _read_long(f)
_skip_bytes(f, 8)
arraydesc['nmax'] = _read_long(f)
arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])]
elif arraydesc['arrstart'] == 18:
warnings.warn("Using experimental 64-bit array read")
_skip_bytes(f, 8)
arraydesc['nbytes'] = _read_uint64(f)
arraydesc['nelements'] = _read_uint64(f)
arraydesc['ndims'] = _read_long(f)
_skip_bytes(f, 8)
arraydesc['nmax'] = 8
arraydesc['dims'] = []
for d in range(arraydesc['nmax']):
v = _read_long(f)
if v != 0:
raise Exception("Expected a zero in ARRAY_DESC")
arraydesc['dims'].append(_read_long(f))
else:
raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart'])
return arraydesc
def _read_structdesc(f):
'''Function to read in a structure descriptor'''
structdesc = {}
structstart = _read_long(f)
if structstart != 9:
raise Exception("STRUCTSTART should be 9")
structdesc['name'] = _read_string(f)
predef = _read_long(f)
structdesc['ntags'] = _read_long(f)
structdesc['nbytes'] = _read_long(f)
structdesc['predef'] = predef & 1
structdesc['inherits'] = predef & 2
structdesc['is_super'] = predef & 4
if not structdesc['predef']:
structdesc['tagtable'] = [_read_tagdesc(f)
for _ in range(structdesc['ntags'])]
for tag in structdesc['tagtable']:
tag['name'] = _read_string(f)
structdesc['arrtable'] = {tag['name']: _read_arraydesc(f)
for tag in structdesc['tagtable']
if tag['array']}
structdesc['structtable'] = {tag['name']: _read_structdesc(f)
for tag in structdesc['tagtable']
if tag['structure']}
if structdesc['inherits'] or structdesc['is_super']:
structdesc['classname'] = _read_string(f)
structdesc['nsupclasses'] = _read_long(f)
structdesc['supclassnames'] = [
_read_string(f) for _ in range(structdesc['nsupclasses'])]
structdesc['supclasstable'] = [
_read_structdesc(f) for _ in range(structdesc['nsupclasses'])]
STRUCT_DICT[structdesc['name']] = structdesc
else:
if not structdesc['name'] in STRUCT_DICT:
raise Exception("PREDEF=1 but can't find definition")
structdesc = STRUCT_DICT[structdesc['name']]
return structdesc
def _read_tagdesc(f):
'''Function to read in a tag descriptor'''
tagdesc = {'offset': _read_long(f)}
if tagdesc['offset'] == -1:
tagdesc['offset'] = _read_uint64(f)
tagdesc['typecode'] = _read_long(f)
tagflags = _read_long(f)
tagdesc['array'] = tagflags & 4 == 4
tagdesc['structure'] = tagflags & 32 == 32
tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT
# Assume '10'x is scalar
return tagdesc
def _replace_heap(variable, heap):
if isinstance(variable, Pointer):
while isinstance(variable, Pointer):
if variable.index == 0:
variable = None
else:
if variable.index in heap:
variable = heap[variable.index]
else:
warnings.warn("Variable referenced by pointer not found "
"in heap: variable will be set to None")
variable = None
replace, new = _replace_heap(variable, heap)
if replace:
variable = new
return True, variable
elif isinstance(variable, np.core.records.recarray):
# Loop over records
for ir, record in enumerate(variable):
replace, new = _replace_heap(record, heap)
if replace:
variable[ir] = new
return False, variable
elif isinstance(variable, np.core.records.record):
# Loop over values
for iv, value in enumerate(variable):
replace, new = _replace_heap(value, heap)
if replace:
variable[iv] = new
return False, variable
elif isinstance(variable, np.ndarray):
# Loop over values if type is np.object_
if variable.dtype.type is np.object_:
for iv in range(variable.size):
replace, new = _replace_heap(variable.item(iv), heap)
if replace:
variable.itemset(iv, new)
return False, variable
else:
return False, variable
class AttrDict(dict):
'''
A case-insensitive dictionary with access via item, attribute, and call
notations:
>>> d = AttrDict()
>>> d['Variable'] = 123
>>> d['Variable']
123
>>> d.Variable
123
>>> d.variable
123
>>> d('VARIABLE')
123
'''
def __init__(self, init={}):
dict.__init__(self, init)
def __getitem__(self, name):
return super(AttrDict, self).__getitem__(name.lower())
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key.lower(), value)
__getattr__ = __getitem__
__setattr__ = __setitem__
__call__ = __getitem__
def readsav(file_name, idict=None, python_dict=False,
uncompressed_file_name=None, verbose=False):
"""
Read an IDL .sav file.
Parameters
----------
file_name : str
Name of the IDL save file.
idict : dict, optional
Dictionary in which to insert .sav file variables.
python_dict : bool, optional
By default, the object return is not a Python dictionary, but a
case-insensitive dictionary with item, attribute, and call access
to variables. To get a standard Python dictionary, set this option
to True.
uncompressed_file_name : str, optional
This option only has an effect for .sav files written with the
/compress option. If a file name is specified, compressed .sav
files are uncompressed to this file. Otherwise, readsav will use
the `tempfile` module to determine a temporary filename
automatically, and will remove the temporary file upon successfully
reading it in.
verbose : bool, optional
Whether to print out information about the save file, including
the records read, and available variables.
Returns
-------
idl_dict : AttrDict or dict
If `python_dict` is set to False (default), this function returns a
case-insensitive dictionary with item, attribute, and call access
to variables. If `python_dict` is set to True, this function
returns a Python dictionary with all variable names in lowercase.
If `idict` was specified, then variables are written to the
dictionary specified, and the updated dictionary is returned.
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> import scipy.io as sio
>>> from scipy.io import readsav
Get the filename for an example .sav file from the tests/data directory.
>>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
>>> sav_fname = pjoin(data_dir, 'array_float32_1d.sav')
Load the .sav file contents.
>>> sav_data = readsav(sav_fname)
Get keys of the .sav file contents.
>>> print(sav_data.keys())
dict_keys(['array1d'])
Access a content with a key.
>>> print(sav_data['array1d'])
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
"""
# Initialize record and variable holders
records = []
if python_dict or idict:
variables = {}
else:
variables = AttrDict()
# Open the IDL file
f = open(file_name, 'rb')
# Read the signature, which should be 'SR'
signature = _read_bytes(f, 2)
if signature != b'SR':
raise Exception(f"Invalid SIGNATURE: {signature}")
# Next, the record format, which is '\x00\x04' for normal .sav
# files, and '\x00\x06' for compressed .sav files.
recfmt = _read_bytes(f, 2)
if recfmt == b'\x00\x04':
pass
elif recfmt == b'\x00\x06':
if verbose:
print("IDL Save file is compressed")
if uncompressed_file_name:
fout = open(uncompressed_file_name, 'w+b')
else:
fout = tempfile.NamedTemporaryFile(suffix='.sav')
if verbose:
print(f" -> expanding to {fout.name}")
# Write header
fout.write(b'SR\x00\x04')
# Cycle through records
while True:
# Read record type
rectype = _read_long(f)
fout.write(struct.pack('>l', int(rectype)))
# Read position of next record and return as int
nextrec = _read_uint32(f)
nextrec += _read_uint32(f) * 2**32
# Read the unknown 4 bytes
unknown = f.read(4)
# Check if the end of the file has been reached
if RECTYPE_DICT[rectype] == 'END_MARKER':
fout.write(struct.pack('>I', int(nextrec) % 2**32))
fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
fout.write(unknown)
break
# Find current position
pos = f.tell()
# Decompress record
rec_string = zlib.decompress(f.read(nextrec-pos))
# Find new position of next record
nextrec = fout.tell() + len(rec_string) + 12
# Write out record
fout.write(struct.pack('>I', int(nextrec % 2**32)))
fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
fout.write(unknown)
fout.write(rec_string)
# Close the original compressed file
f.close()
# Set f to be the decompressed file, and skip the first four bytes
f = fout
f.seek(4)
else:
raise Exception(f"Invalid RECFMT: {recfmt}")
# Loop through records, and add them to the list
while True:
r = _read_record(f)
records.append(r)
if 'end' in r:
if r['end']:
break
# Close the file
f.close()
# Find heap data variables
heap = {}
for r in records:
if r['rectype'] == "HEAP_DATA":
heap[r['heap_index']] = r['data']
# Find all variables
for r in records:
if r['rectype'] == "VARIABLE":
replace, new = _replace_heap(r['data'], heap)
if replace:
r['data'] = new
variables[r['varname'].lower()] = r['data']
if verbose:
# Print out timestamp info about the file
for record in records:
if record['rectype'] == "TIMESTAMP":
print("-"*50)
print(f"Date: {record['date']}")
print(f"User: {record['user']}")
print(f"Host: {record['host']}")
break
# Print out version info about the file
for record in records:
if record['rectype'] == "VERSION":
print("-"*50)
print(f"Format: {record['format']}")
print(f"Architecture: {record['arch']}")
print(f"Operating System: {record['os']}")
print(f"IDL Version: {record['release']}")
break
# Print out identification info about the file
for record in records:
if record['rectype'] == "IDENTIFICATON":
print("-"*50)
print(f"Author: {record['author']}")
print(f"Title: {record['title']}")
print(f"ID Code: {record['idcode']}")
break
# Print out descriptions saved with the file
for record in records:
if record['rectype'] == "DESCRIPTION":
print("-"*50)
print(f"Description: {record['description']}")
break
print("-"*50)
print("Successfully read %i records of which:" %
(len(records)))
# Create convenience list of record types
rectypes = [r['rectype'] for r in records]
for rt in set(rectypes):
if rt != 'END_MARKER':
print(" - %i are of type %s" % (rectypes.count(rt), rt))
print("-"*50)
if 'VARIABLE' in rectypes:
print("Available variables:")
for var in variables:
print(f" - {var} [{type(variables[var])}]")
print("-"*50)
if idict:
for var in variables:
idict[var] = variables[var]
return idict
else:
return variables
| 29.355876 | 89 | 0.562559 | [
"BSD-3-Clause"
] | ikamensh/scipy | scipy/io/idl.py | 26,479 | Python |
from typing import List, Optional, Any, Dict
from checkov.common.graph.checks_infra.enums import Operators
from .equals_attribute_solver import EqualsAttributeSolver
class NotEqualsAttributeSolver(EqualsAttributeSolver):
operator = Operators.NOT_EQUALS
def __init__(self, resource_types: List[str], attribute: Optional[str], value: Any) -> None:
super().__init__(resource_types=resource_types, attribute=attribute, value=value)
def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:
return not super()._get_operation(vertex, attribute)
| 39.866667 | 96 | 0.770903 | [
"Apache-2.0"
] | 0xflotus/checkov | checkov/common/checks_infra/solvers/attribute_solvers/not_equals_attribute_solver.py | 598 | Python |
#!/usr/bin/env python
""" These tests only check whether plots are created,
not that they look correct!
"""
import unittest
import os
import sys
from glob import glob
import numpy as np
import matador.cli.dispersion
from matador.scrapers import res2dict, magres2dict
from matador.hull import QueryConvexHull
from matador.plotting.battery_plotting import plot_voltage_curve
from matador.plotting.pdf_plotting import plot_pdf
from matador.plotting.pxrd_plotting import plot_pxrd
from matador.plotting.magres_plotting import plot_magres
from .utils import MatadorUnitTest
REAL_PATH = "/".join(os.path.realpath(__file__).split("/")[:-1]) + "/"
ROOT_DIR = os.getcwd()
try:
import matplotlib # noqa
matplotlib.use("Agg")
MATPLOTLIB_PRESENT = True
except ImportError:
MATPLOTLIB_PRESENT = False
try:
import ternary # noqa
TERNARY_PRESENT = True
except ImportError:
TERNARY_PRESENT = False
try:
import _tkinter # noqa
except Exception:
MATPLOTLIB_PRESENT = False
@unittest.skipIf(not MATPLOTLIB_PRESENT, "Skipping plotting tests.")
class SpectralPlotTests(unittest.TestCase):
""" Test Dispersion script. """
def test_pdis_plot(self):
""" Test combined spectral plots. """
os.chdir(REAL_PATH + "/data/dispersion")
expected_file = "K3P-OQMD_4786-CollCode25550_spectral.png"
if os.path.isfile(expected_file):
os.remove(expected_file)
sys.argv = [
"dispersion",
"K3P-OQMD_4786-CollCode25550",
"--png",
"-scale",
"10",
"-interp",
"2",
"-pw",
"-5",
"5",
"--gap",
"--preserve_kspace_distance",
"--figsize",
"10",
"10",
]
errored = False
try:
matador.cli.dispersion.main()
except Exception as exc:
errored = True
error = exc
file_exists = os.path.isfile(expected_file)
if file_exists:
os.remove(expected_file)
os.chdir(ROOT_DIR)
if errored:
raise error
self.assertTrue(file_exists)
def test_dos_only(self):
""" Test combined spectral plots. """
os.chdir(REAL_PATH + "/data/dispersion")
expected_file = "K3P-OQMD_4786-CollCode25550_spectral.png"
if os.path.isfile(expected_file):
os.remove(expected_file)
sys.argv = [
"dispersion",
"K3P-OQMD_4786-CollCode25550",
"--png",
"--dos_only",
"--figsize",
"10",
"10",
]
errored = False
try:
matador.cli.dispersion.main()
except Exception as exc:
errored = True
error = exc
file_exists = os.path.isfile(expected_file)
if file_exists:
os.remove(expected_file)
os.chdir(ROOT_DIR)
if errored:
raise error
self.assertTrue(file_exists)
def test_multiseed(self):
""" Test plotting two seed bandstructures on top of each other. """
os.chdir(REAL_PATH + "/data/bands_files")
expected_file = "KPSn_spectral.png"
sys.argv = [
"dispersion",
"KPSn",
"KPSn_2",
"--dos_only",
"--cmap",
"viridis",
"--png",
"--band_reorder",
"--labels",
"PBE, LDA",
"--figsize",
"10",
"10",
"--colours",
"green",
"red"
]
errored = False
try:
matador.cli.dispersion.main()
except Exception as exc:
errored = True
error = exc
file_exists = os.path.isfile(expected_file)
if file_exists:
os.remove(expected_file)
os.chdir(ROOT_DIR)
if errored:
raise error
self.assertTrue(file_exists)
def test_x11_no_fail(self):
""" Test combined spectral plots. """
os.chdir(REAL_PATH + "/data/dispersion")
sys.argv = [
"dispersion",
"K3P-OQMD_4786-CollCode25550",
"--dos_only",
"--cmap",
"viridis",
"--figsize",
"10",
"10",
]
errored = False
try:
matador.cli.dispersion.main()
except Exception as exc:
errored = True
error = exc
os.chdir(ROOT_DIR)
if errored:
raise error
def test_phonon_dispersion(self):
""" Test phonon dispersion plot. """
os.chdir(REAL_PATH + "/data/phonon_dispersion")
expected_file = "K3P_spectral.png"
if os.path.isfile(expected_file):
os.remove(expected_file)
sys.argv = [
"dispersion",
"K3P",
"--png",
"-ph",
"--colours",
"grey",
"green",
"blue",
"--figsize",
"10",
"10",
]
errored = False
try:
matador.cli.dispersion.main()
except Exception as exc:
errored = True
error = exc
file_exists = os.path.isfile(expected_file)
if file_exists:
os.remove(expected_file)
os.chdir(ROOT_DIR)
if errored:
raise error
self.assertTrue(file_exists)
def test_phonon_ir(self):
""" Test phonon IR/Raman plot. """
os.chdir(REAL_PATH + "/data/phonon_ir")
expected_file = "h-BN_IRR_ir.svg"
if os.path.isfile(expected_file):
os.remove(expected_file)
sys.argv = ["dispersion", "h-BN_IRR", "--svg", "-ir", "--figsize", "5", "5"]
errored = False
try:
matador.cli.dispersion.main()
except Exception as exc:
errored = True
error = exc
file_exists = os.path.isfile(expected_file)
if file_exists:
os.remove(expected_file)
os.chdir(ROOT_DIR)
if errored:
raise error
self.assertTrue(file_exists)
def test_projector_scraping(self):
from matador.plotting.spectral_plotting import _parse_projectors_list
self.assertEqual(
_parse_projectors_list("K"),
[
("K", "s", None),
("K", "p", None),
("K", "d", None),
("K", "f", None),
("K", None, None),
],
)
self.assertEqual(
_parse_projectors_list("K,P"),
[
("K", "s", None),
("K", "p", None),
("K", "d", None),
("K", "f", None),
("K", None, None),
("P", "s", None),
("P", "p", None),
("P", "d", None),
("P", "f", None),
("P", None, None),
],
)
self.assertEqual(
_parse_projectors_list("K,P:s"),
[
("K", "s", None),
("K", "p", None),
("K", "d", None),
("K", "f", None),
("K", None, None),
("P", "s", None),
],
)
self.assertEqual(
_parse_projectors_list("123:x,P:s"), [("123", "x", None), ("P", "s", None)]
)
@unittest.skipIf(not MATPLOTLIB_PRESENT, "Skipping plotting tests.")
class HullPlotTests(MatadorUnitTest):
""" Tests for plotting phase diagrams. """
def test_binary_hull_plot(self):
""" Test plotting binary hull. """
expected_files = ["KP_hull_simple.svg"]
cursor = res2dict(REAL_PATH + "data/hull-KP-KSnP_pub/*.res")[0]
QueryConvexHull(
cursor=cursor,
elements=["K", "P"],
svg=True,
hull_cutoff=0.0,
plot_kwargs={"plot_fname": "KP_hull_simple", "svg": True},
)
for expected_file in expected_files:
self.assertTrue(os.path.isfile(expected_file))
def test_binary_battery_plots(self):
""" Test plotting binary hull. """
expected_files = ["KP_hull.png", "KP_voltage.png", "KP_volume.png"]
cursor = res2dict(REAL_PATH + "data/hull-KP-KSnP_pub/*.res")[0]
QueryConvexHull(
cursor=cursor,
elements=["K", "P"],
no_plot=False,
png=True,
quiet=False,
voltage=True,
labels=True,
label_cutoff=0.05,
hull_cutoff=0.1,
volume=True,
plot_kwargs={"colour_by_source": True},
)
for expected_file in expected_files:
self.assertTrue(os.path.isfile(expected_file))
def test_voltage_labels(self):
expected_files = ["KP_voltage.png"]
cursor = res2dict(REAL_PATH + "data/hull-KP-KSnP_pub/*.res")[0]
hull = QueryConvexHull(
cursor=cursor, species="KP", no_plot=True, voltage=True, labels=True
)
plot_voltage_curve(hull.voltage_data, labels=True, savefig=expected_files[0])
for expected_file in expected_files:
self.assertTrue(os.path.isfile(expected_file))
@unittest.skipIf(not TERNARY_PRESENT, "Skipping as python-ternary not found")
def test_ternary_hull_plot(self):
""" Test plotting ternary hull. """
expected_files = ["KSnP_hull.png", "KSnP_voltage.png"]
for expected_file in expected_files:
if os.path.isfile(expected_file):
os.remove(expected_file)
res_list = glob(REAL_PATH + "data/hull-KPSn-KP/*.res")
self.assertEqual(
len(res_list),
87,
"Could not find test res files, please check installation...",
)
cursor = [res2dict(res)[0] for res in res_list]
QueryConvexHull(
cursor=cursor,
elements=["K", "Sn", "P"],
no_plot=False,
png=True,
quiet=False,
voltage=True,
labels=True,
label_cutoff=0.05,
hull_cutoff=0.1,
capmap=True,
)
self.assertTrue(os.path.isfile(expected_file))
for expected_file in expected_files:
os.remove(expected_file)
def test_beef_hull_plot(self):
""" Test plotting BEEF hull. """
from matador.hull import EnsembleHull
from matador.scrapers import castep2dict
expected_file = "KP_beef_hull.svg"
cursor, s = castep2dict(REAL_PATH + "data/beef_files/*.castep", db=False)
self.assertEqual(len(s), 0)
beef_hull = EnsembleHull(
cursor,
"_beef",
elements=["K", "P"],
num_samples=10,
energy_key="total_energy_per_atom",
parameter_key="thetas",
)
beef_hull.plot_hull(svg=True)
self.assertTrue(os.path.isfile(expected_file))
def test_td_hull_plot(self):
from matador.hull.hull_temperature import TemperatureDependentHull
from matador.scrapers import castep2dict
cursor, s = castep2dict(REAL_PATH + "data/castep_phonon_files/*.castep", db=False)
td_hull = TemperatureDependentHull(cursor=cursor, energy_key="total_energy_per_atom")
td_hull.plot_hull(plot_fname="td_hull", png=True)
self.assertTrue(os.path.isfile("td_hull.png"))
@unittest.skipIf(not MATPLOTLIB_PRESENT, "Skipping plotting tests.")
class FingerprintPlotTests(MatadorUnitTest):
""" Test ability to plot PDF and PXRDs. """
def test_pdf_plot(self):
structure = res2dict(
REAL_PATH + "data/res_files/KPSn-OQMD_123456.res", as_model=True
)[0]
plot_pdf(structure, png=True)
self.assertTrue(os.path.isfile("K7PSn_pdf.png"))
plot_pdf([structure, structure], filename="test_pdf", rmax=5, png=True)
self.assertTrue(os.path.isfile("test_pdf.png"))
def test_pxrd_plot(self):
structure = res2dict(
REAL_PATH + "data/res_files/KPSn-OQMD_123456.res", as_model=True
)[0]
plot_pxrd(structure, png=True)
self.assertTrue(os.path.isfile("K7PSn_pxrd.png"))
plot_pdf([structure, structure], filename="test_pxrd", png=True)
self.assertTrue(os.path.isfile("test_pxrd.png"))
@unittest.skipIf(not MATPLOTLIB_PRESENT, "Skipping plotting tests.")
class MagresPlotTests(MatadorUnitTest):
""" Test ability to plot magres data. """
def test_magres_plot(self):
magres, f = magres2dict(REAL_PATH + "data/magres_files/*P*.magres", as_model=True)
plot_magres(
magres,
species="P",
savefig="magres_P.pdf",
line_kwargs={"c": "green"},
)
self.assertTrue(os.path.isfile("magres_P.pdf"))
plot_magres(
magres,
species="Li",
broadening_width=0,
magres_key="chemical_shift_aniso",
savefig="magres_Li.png",
signal_labels=["NaP", "LiP"],
line_kwargs=[{"lw": 3}, {"ls": "--"}],
)
self.assertTrue(os.path.isfile("magres_Li.png"))
with self.assertRaises(RuntimeError):
plot_magres(magres, species=None)
with self.assertRaises(RuntimeError):
plot_magres(magres, species="K")
@unittest.skipIf(not MATPLOTLIB_PRESENT, "Skipping plotting tests.")
class ConvergencePlotTest(unittest.TestCase):
""" Test the ability to read convergence data and make plots. """
def setUp(self):
os.chdir(REAL_PATH + "/data/convergence/")
def tearDown(self):
os.chdir(ROOT_DIR)
def test_scraping_and_plotting(self):
from matador.plotting.convergence_plotting import (
get_convergence_data,
get_convergence_files,
combine_convergence_data,
get_convergence_values,
)
from matador.plotting.convergence_plotting import plot_cutoff_kpt_grid
kpt_files = get_convergence_files("completed_kpts")
cutoff_files = get_convergence_files("completed_cutoff")
kpt_data = get_convergence_data(
kpt_files, conv_parameter="kpoints_mp_spacing", species=["Li"]
)
cutoff_data = get_convergence_data(
cutoff_files, conv_parameter="cut_off_energy", species=["Li"]
)
data = combine_convergence_data(kpt_data, cutoff_data)
self.assertEqual(
data["Li-bcc"]["kpoints_mp_spacing"]["kpoints_mp_spacing"], [0.1, 0.07]
)
self.assertEqual(data["Li-bcc"]["cut_off_energy"]["cut_off_energy"], [300, 400])
values, parameters = get_convergence_values(
data["Li-bcc"], "cut_off_energy", "formation_energy_per_atom", log=True
)
self.assertEqual(parameters.tolist(), [300.0, 400.0])
self.assertAlmostEqual(values.tolist()[0], 0.7291198427497395, places=6)
self.assertEqual(values.tolist()[1], -np.inf)
self.data = data
expected_files = ["conv.svg"]
for expected_file in expected_files:
if os.path.isfile(expected_file):
os.remove(expected_file)
plot_cutoff_kpt_grid(self.data, svg=True)
for file in expected_files:
self.assertTrue(os.path.isfile(file))
for expected_file in expected_files:
os.remove(expected_file)
| 31.769388 | 93 | 0.558232 | [
"MIT"
] | AJMGroup/matador | tests/test_plotting.py | 15,567 | Python |
def is_dark(wf):
if not wf.alfred_env.get('theme_background'):
return True
rgb = [int(x) for x in wf.alfred_env['theme_background'][5:-6].split(',')]
return (0.299 * rgb[0] + 0.587 * rgb[1] + 0.114 * rgb[2]) / 255 < 0.5
def get_icon(wf, name):
name = '%s-dark' % name if is_dark(wf) else name
return "icons/%s.png" % name
def search_key_for_action(action):
return u'{}'.format(action['name'])
def search_key_for_project(project):
return u'{} {} {}'.format(project['id'], project['name'], project['description'])
def search_key_for_aliased_activities(activity):
return u'{} {} {}'.format(activity['name'], activity['project']['name'], activity['alias']) | 30.863636 | 92 | 0.656848 | [
"MIT"
] | r0x73/alfred-zebra | src/helpers.py | 679 | Python |
# -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://cloudbilling.googleapis.com/v1/'
DOCS_URL = 'https://cloud.google.com/billing/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
BILLINGACCOUNTS = (
'billingAccounts',
'{+name}',
{
'':
'billingAccounts/{billingAccountsId}',
},
[u'name'],
True
)
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
[u'projectsId'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
| 27.480769 | 74 | 0.671798 | [
"Apache-2.0"
] | bshaffer/google-cloud-sdk | lib/googlecloudsdk/third_party/apis/cloudbilling/v1/resources.py | 1,429 | Python |
# coding: utf-8
"""
Laserfiche API
Welcome to the Laserfiche API Swagger Playground. You can try out any of our API calls against your live Laserfiche Cloud account. Visit the developer center for more details: <a href=\"https://developer.laserfiche.com\">https://developer.laserfiche.com</a><p><strong>Build# : </strong>650780</p> # noqa: E501
OpenAPI spec version: 1-alpha
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetEdocWithAuditReasonRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'audit_reason_id': 'int',
'comment': 'str'
}
attribute_map = {
'audit_reason_id': 'auditReasonId',
'comment': 'comment'
}
def __init__(self, audit_reason_id=None, comment=None): # noqa: E501
"""GetEdocWithAuditReasonRequest - a model defined in Swagger""" # noqa: E501
self._audit_reason_id = None
self._comment = None
self.discriminator = None
if audit_reason_id is not None:
self.audit_reason_id = audit_reason_id
if comment is not None:
self.comment = comment
@property
def audit_reason_id(self):
"""Gets the audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501
The reason id for this audit event. # noqa: E501
:return: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501
:rtype: int
"""
return self._audit_reason_id
@audit_reason_id.setter
def audit_reason_id(self, audit_reason_id):
"""Sets the audit_reason_id of this GetEdocWithAuditReasonRequest.
The reason id for this audit event. # noqa: E501
:param audit_reason_id: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501
:type: int
"""
self._audit_reason_id = audit_reason_id
@property
def comment(self):
"""Gets the comment of this GetEdocWithAuditReasonRequest. # noqa: E501
The comment for this audit event. # noqa: E501
:return: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501
:rtype: str
"""
return self._comment
@comment.setter
def comment(self, comment):
"""Sets the comment of this GetEdocWithAuditReasonRequest.
The comment for this audit event. # noqa: E501
:param comment: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501
:type: str
"""
self._comment = comment
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GetEdocWithAuditReasonRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetEdocWithAuditReasonRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.93617 | 314 | 0.603598 | [
"BSD-2-Clause"
] | Layer8Err/laserfiche-api | laserfiche_api/models/get_edoc_with_audit_reason_request.py | 4,503 | Python |
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from allauth.socialaccount.helpers import render_authentication_error
from allauth.socialaccount.providers.oauth.client import (OAuthClient,
OAuthError)
from allauth.socialaccount.helpers import complete_social_login
from allauth.socialaccount import providers
from allauth.socialaccount.models import SocialToken, SocialLogin
class OAuthAdapter(object):
def complete_login(self, request, app):
"""
Returns a SocialLogin instance
"""
raise NotImplementedError
def get_provider(self):
return providers.registry.by_id(self.provider_id)
class OAuthView(object):
@classmethod
def adapter_view(cls, adapter):
def view(request, *args, **kwargs):
self = cls()
self.request = request
self.adapter = adapter()
return self.dispatch(request, *args, **kwargs)
return view
def _get_client(self, request, callback_url):
provider = self.adapter.get_provider()
app = provider.get_app(request)
scope = ' '.join(provider.get_scope())
parameters = {}
if scope:
parameters['scope'] = scope
for param in request.GET:
if param.startswith('auth_param_'):
try: parameters['auth_params'][param[11:]] = request.GET.get(param)
except KeyError:parameters['auth_params'] = {param[11:]: request.GET.get(param)}
if 'redirect_account_url' in request.GET:
request.session['redirect_account_url'] = request.GET['redirect_account_url']
client = OAuthClient(request, app.key, app.secret,
self.adapter.request_token_url,
self.adapter.access_token_url,
self.adapter.authorize_url,
request.GET.get('callback_url', callback_url),
parameters=parameters, disable_ssl_certificate_validation=True)
return client
class OAuthLoginView(OAuthView):
def dispatch(self, request):
callback_url = reverse(self.adapter.provider_id + "_callback")
# TODO: Can't this be moved as query param into callback?
# Tried but failed somehow, needs further study...
request.session['oauth_login_state'] \
= SocialLogin.marshall_state(request)
client = self._get_client(request, callback_url)
try:
return client.get_redirect()
except OAuthError:
return render_authentication_error(request)
class OAuthCallbackView(OAuthView):
def dispatch(self, request):
"""
View to handle final steps of OAuth based authentication where the user
gets redirected back to from the service provider
"""
login_done_url = reverse(self.adapter.provider_id + "_callback")
client = self._get_client(request, login_done_url)
if not client.is_valid():
if 'denied' in request.GET:
return HttpResponseRedirect(reverse('socialaccount_login_cancelled'))
extra_context = dict(oauth_client=client)
return render_authentication_error(request, extra_context)
app = self.adapter.get_provider().get_app(request)
try:
access_token = client.get_access_token()
token = SocialToken(app=app,
token=access_token['oauth_token'],
token_secret=access_token['oauth_token_secret'])
login = self.adapter.complete_login(request, app, token)
token.account = login.account
login.token = token
login.state = SocialLogin.unmarshall_state \
(request.session.pop('oauth_login_state', None))
login.redirect_account_url = request.session.pop('redirect_account_url', None)
return complete_social_login(request, login)
except OAuthError:
return render_authentication_error(request)
| 40.941176 | 96 | 0.630747 | [
"MIT"
] | rawjam/django-allauth | allauth/socialaccount/providers/oauth/views.py | 4,176 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetMachineLearningComputeResult',
'AwaitableGetMachineLearningComputeResult',
'get_machine_learning_compute',
]
@pulumi.output_type
class GetMachineLearningComputeResult:
"""
Machine Learning compute object wrapped into ARM resource envelope.
"""
def __init__(__self__, identity=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Compute properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetMachineLearningComputeResult(GetMachineLearningComputeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMachineLearningComputeResult(
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_machine_learning_compute(compute_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMachineLearningComputeResult:
"""
Use this data source to access information about an existing resource.
:param str compute_name: Name of the Azure Machine Learning compute.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['computeName'] = compute_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearningservices/v20200901preview:getMachineLearningCompute', __args__, opts=opts, typ=GetMachineLearningComputeResult).value
return AwaitableGetMachineLearningComputeResult(
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 34.707483 | 183 | 0.640141 | [
"Apache-2.0"
] | test-wiz-sec/pulumi-azure-nextgen | sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200901preview/get_machine_learning_compute.py | 5,102 | Python |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import os, json, hashlib
from torch.autograd import Function
from http import client as http_client
import antares_custom_op
def generate_antares_expression(antares_ir, inputs):
input_dict, kwargs = {}, {}
for i in range(len(inputs)):
input_dict['input%d' % i] = {
'dtype': str(inputs[i].dtype).split('.')[1],
'shape': list(inputs[i].shape)
}
kwargs['input%d' % i] = inputs[i]
input_dict = json.dumps(input_dict)
return '- einstein_v2("%s", input_dict=%s)' % (antares_ir.replace('"', '\\"'), input_dict)
def fetch_and_compile_antares_kernel(expression, expr_hash, server_addr):
print('+ [Antares Op]', expression)
h = http_client.HTTPConnection(server_addr, timeout=10)
try:
h.request('GET', '/', headers={'COMPUTE_V1': expression})
except:
raise Exception("Failed to contact with Antares server: %s (not started?)" % server_addr)
res = h.getresponse()
if res.status != 200:
raise Exception("Fail to get server response, reason: %s" % res.reason)
source = res.read().decode()
try:
meta_bgn = source.index('///') + len('///')
except:
raise Exception("Illegal syntax for Antares expression: %s" % expression)
meta_pos = source.index(':', meta_bgn)
meta_end = source.index('\n', meta_pos)
meta_inputs = source[meta_bgn:meta_pos].split(',')
meta_outputs = source[meta_pos + 1:meta_end].split(',')
code_name = 'Antares' + expr_hash
source_path = '/tmp/antares_torch_%s.cc.kernel.cu' % code_name
# Compile Kernel object
with open(source_path, 'w') as fp:
fp.write(source)
return source, source_path, expr_hash, meta_inputs, meta_outputs
'''
class CustomFunction(Function):
@staticmethod
def forward(ctx, inputs, attributes):
outputs = antares_custom_op.forward(inputs, *attributes)
return outputs
'''
class CustomOp(torch.nn.Module):
def __init__(self, server_addr='localhost:8880'):
super(CustomOp, self).__init__()
self.server_addr = server_addr
self.ops = dict()
def forward(self, antares_ir, inputs):
antares_expr = generate_antares_expression(antares_ir, inputs)
expr_hash = hashlib.sha256(antares_expr.encode()).hexdigest()
if expr_hash in self.ops:
attributes = self.ops[expr_hash]
else:
attributes = fetch_and_compile_antares_kernel(antares_expr, expr_hash, self.server_addr)
self.ops[expr_hash] = attributes
outputs = antares_custom_op.forward(inputs, *attributes)
return outputs
| 32.164557 | 94 | 0.702086 | [
"MIT"
] | kdtree/antares | frameworks/antares/pytorch/custom_op.py | 2,541 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class ApplyRecordTokenRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'ApplyRecordToken','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_AppId(self):
return self.get_query_params().get('AppId')
def set_AppId(self,AppId):
self.add_query_param('AppId',AppId) | 35.659091 | 77 | 0.760357 | [
"Apache-2.0"
] | Explorer1092/aliyun-openapi-python-sdk | aliyun-python-sdk-live/aliyunsdklive/request/v20161101/ApplyRecordTokenRequest.py | 1,569 | Python |
"""
A simple class to help with paging result sets
"""
import logging
from flask import request, url_for, Markup
__author__ = 'Stephen Brown (Little Fish Solutions LTD)'
log = logging.getLogger(__name__)
class Pager(object):
"""
Standard Pager used on back end of website.
When viewing page 234 of 1000, the following page links will be displayed:
1, 134, 184, 232, 233, 234, 235, 236, 284, 334, 1000
"""
def __init__(self, page_size, page_number, query):
self.page_size = page_size
try:
self.page_number = int(page_number)
except ValueError:
self.page_number = 1
if self.page_number < 1:
self.page_number = 1
self.query = query
# Do the paging here
self.total_items = query.count()
self.total_pages = (self.total_items - 1) // page_size + 1
if self.page_number > self.total_pages:
self.page_number = self.total_pages
self.offset = self.page_size * (self.page_number - 1)
if self.offset < 0:
self.offset = 1
self.items = query[self.offset:self.offset + self.page_size]
def get_first_item_from_next_page(self):
if self.has_next:
return self.query[self.offset + self.page_size]
return None
def get_last_item_from_previous_page(self):
if self.has_prev:
return self.query[self.offset - 1]
return None
@property
def has_prev(self):
return self.page_number > 1
@property
def has_next(self):
return self.page_number < self.total_pages
@property
def prev(self):
return self.page_number - 1
@property
def next(self):
return self.page_number + 1
@property
def page_link_numbers(self):
pages = [1]
if self.total_pages <= 1:
return pages
if self.page_number > 103:
pages.append(self.page_number - 100)
if self.page_number > 53:
pages.append(self.page_number - 50)
if self.page_number > 3:
pages.append(self.page_number - 2)
if self.page_number > 2:
pages.append(self.page_number - 1)
if self.page_number != 1 and self.page_number != self.total_pages:
pages.append(self.page_number)
if self.page_number < self.total_pages - 1:
pages.append(self.page_number + 1)
if self.page_number < self.total_pages - 2:
pages.append(self.page_number + 2)
if self.page_number < self.total_pages - 52:
pages.append(self.page_number + 50)
if self.page_number < self.total_pages - 102:
pages.append(self.page_number + 100)
pages.append(self.total_pages)
return pages
@property
def empty(self):
return self.total_pages == 0
def get_full_page_url(self, page_number, scheme=None):
"""Get the full, external URL for this page, optinally with the passed in URL scheme"""
args = dict(
request.view_args,
_external=True,
)
if scheme is not None:
args['_scheme'] = scheme
if page_number != 1:
args['page'] = page_number
return url_for(request.endpoint, **args)
def get_canonical_url(self, scheme=None):
"""Get the canonical page URL"""
return self.get_full_page_url(self.page_number, scheme=scheme)
def render_prev_next_links(self, scheme=None):
"""Render the rel=prev and rel=next links to a Markup object for injection into a template"""
output = ''
if self.has_prev:
output += '<link rel="prev" href="{}" />\n'.format(self.get_full_page_url(self.prev, scheme=scheme))
if self.has_next:
output += '<link rel="next" href="{}" />\n'.format(self.get_full_page_url(self.next, scheme=scheme))
return Markup(output)
def render_canonical_link(self, scheme=None):
"""Render the rel=canonical link to a Markup object for injection into a template"""
return Markup('<link rel="canonical" href="{}" />'.format(self.get_canonical_url(scheme=scheme)))
def render_seo_links(self, scheme=None):
"""Render the rel=canonical, rel=prev and rel=next links to a Markup object for injection into a template"""
out = self.render_prev_next_links(scheme=scheme)
if self.total_pages == 1:
out += self.render_canonical_link(scheme=scheme)
return out
@property
def first_item_number(self):
"""
:return: The first "item number", used when displaying messages to the user
like "Displaying items 1 to 10 of 123" - in this example 1 would be returned
"""
return self.offset + 1
@property
def last_item_number(self):
"""
:return: The last "item number", used when displaying messages to the user
like "Displaying items 1 to 10 of 123" - in this example 10 would be returned
"""
n = self.first_item_number + self.page_size - 1
if n > self.total_items:
return self.total_items
return n
class SimplePager(Pager):
"""
Uses the same api as above, but displays a range of continuous page numbers.
If you are on page 6 of 10 the following page numbers will be displayed:
1, 2, 3, 4, 5, 6, 7, 8, 9, 10
"""
def __init__(self, page_size, page_number, query, max_pages=12):
"""
:param max_pages: The maximum number of page links to display
"""
super().__init__(page_size, page_number, query)
self.max_pages = max_pages
@property
def page_link_numbers(self):
start = self.page_number - self.max_pages // 2 + 1
if start < 1:
start = 1
end = start + self.max_pages - 1
if end > self.total_pages:
end = self.total_pages
if start > 1:
start = end - self.max_pages + 1
return range(start, end + 1)
class InMemoryPager(Pager):
"""
Use this when you absolutely have to load everything and page in memory. You can access
all of the items through the all_items attribute after initialising this object
"""
def __init__(self, page_size, page_number, query):
self.page_size = page_size
try:
self.page_number = int(page_number)
except ValueError:
self.page_number = 1
if self.page_number < 1:
self.page_number = 1
self.query = query
# Load everything
self.all_items = query.all()
# Do the paging here
self.total_items = len(self.all_items)
self.total_pages = (self.total_items - 1) // page_size + 1
if self.page_number > self.total_pages:
self.page_number = self.total_pages
self.offset = self.page_size * (self.page_number - 1)
if self.offset < 0:
self.offset = 1
self.items = self.all_items[self.offset:self.offset + self.page_size]
class ViewAllPager(object):
"""
Uses the same API as pager, but lists all items on a single page. This is to allow
easy implementation of a "view all" function on a listing page
"""
def __init__(self, query):
self.page_number = 1
self.query = query
# Do the paging here
self.total_items = query.count()
self.page_size = self.total_items
self.total_pages = 1
self.offset = 0
self.items = query.all()
@property
def has_prev(self):
return False
@property
def has_next(self):
return False
@property
def prev(self):
return self.page_number - 1
@property
def next(self):
return self.page_number + 1
@property
def page_link_numbers(self):
return [1]
| 28.351916 | 116 | 0.593831 | [
"Apache-2.0"
] | michaelwalkerfl/littlefish | build/lib/littlefish/pager.py | 8,137 | Python |
"""Support for TPLink HS100/HS110/HS200 smart switch."""
import logging
import time
from pyHS100 import SmartDeviceException, SmartPlug
from homeassistant.components.switch import (
ATTR_CURRENT_POWER_W,
ATTR_TODAY_ENERGY_KWH,
SwitchDevice,
)
from homeassistant.const import ATTR_VOLTAGE
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.typing import HomeAssistantType
from . import CONF_SWITCH, DOMAIN as TPLINK_DOMAIN
from .common import async_add_entities_retry
PARALLEL_UPDATES = 0
_LOGGER = logging.getLogger(__name__)
ATTR_TOTAL_ENERGY_KWH = "total_energy_kwh"
ATTR_CURRENT_A = "current_a"
async def async_setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform.
Deprecated.
"""
_LOGGER.warning(
"Loading as a platform is no longer supported, "
"convert to use the tplink component."
)
def add_entity(device: SmartPlug, async_add_entities):
"""Check if device is online and add the entity."""
# Attempt to get the sysinfo. If it fails, it will raise an
# exception that is caught by async_add_entities_retry which
# will try again later.
device.get_sysinfo()
async_add_entities([SmartPlugSwitch(device)], update_before_add=True)
async def async_setup_entry(hass: HomeAssistantType, config_entry, async_add_entities):
"""Set up switches."""
await async_add_entities_retry(
hass, async_add_entities, hass.data[TPLINK_DOMAIN][CONF_SWITCH], add_entity
)
return True
class SmartPlugSwitch(SwitchDevice):
"""Representation of a TPLink Smart Plug switch."""
def __init__(self, smartplug: SmartPlug):
"""Initialize the switch."""
self.smartplug = smartplug
self._sysinfo = None
self._state = None
self._available = False
# Set up emeter cache
self._emeter_params = {}
self._mac = None
self._alias = None
self._model = None
self._device_id = None
@property
def unique_id(self):
"""Return a unique ID."""
return self._device_id
@property
def name(self):
"""Return the name of the Smart Plug."""
return self._alias
@property
def device_info(self):
"""Return information about the device."""
return {
"name": self._alias,
"model": self._model,
"manufacturer": "TP-Link",
"connections": {(dr.CONNECTION_NETWORK_MAC, self._mac)},
"sw_version": self._sysinfo["sw_ver"],
}
@property
def available(self) -> bool:
"""Return if switch is available."""
return self._available
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.smartplug.turn_on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.smartplug.turn_off()
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
def update(self):
"""Update the TP-Link switch's state."""
try:
if not self._sysinfo:
self._sysinfo = self.smartplug.sys_info
self._mac = self.smartplug.mac
self._model = self.smartplug.model
if self.smartplug.context is None:
self._alias = self.smartplug.alias
self._device_id = self._mac
else:
self._alias = [
child
for child in self.smartplug.sys_info["children"]
if child["id"] == self.smartplug.context
][0]["alias"]
self._device_id = self.smartplug.context
if self.smartplug.context is None:
self._state = self.smartplug.state == self.smartplug.SWITCH_STATE_ON
else:
self._state = [
child
for child in self.smartplug.sys_info["children"]
if child["id"] == self.smartplug.context
][0]["state"] == 1
if self.smartplug.has_emeter:
emeter_readings = self.smartplug.get_emeter_realtime()
self._emeter_params[ATTR_CURRENT_POWER_W] = "{:.2f}".format(
emeter_readings["power"]
)
self._emeter_params[ATTR_TOTAL_ENERGY_KWH] = "{:.3f}".format(
emeter_readings["total"]
)
self._emeter_params[ATTR_VOLTAGE] = "{:.1f}".format(
emeter_readings["voltage"]
)
self._emeter_params[ATTR_CURRENT_A] = "{:.2f}".format(
emeter_readings["current"]
)
emeter_statics = self.smartplug.get_emeter_daily()
try:
self._emeter_params[ATTR_TODAY_ENERGY_KWH] = "{:.3f}".format(
emeter_statics[int(time.strftime("%e"))]
)
except KeyError:
# Device returned no daily history
pass
self._available = True
except (SmartDeviceException, OSError) as ex:
if self._available:
_LOGGER.warning(
"Could not read state for %s: %s", self.smartplug.host, ex
)
self._available = False
| 31.5 | 87 | 0.582665 | [
"Apache-2.0"
] | ABOTlegacy/home-assistant | homeassistant/components/tplink/switch.py | 5,607 | Python |
import cpboard
import periphery
import pytest
import smbus
import sys
def pytest_addoption(parser):
group = parser.getgroup('i2cslave')
group.addoption("--bus", dest='i2cbus', type=int, help='I2C bus number')
group.addoption("--serial-wait", default=20, dest='serial_wait', type=int, help='Number of milliseconds to wait before checking board output (default: 20ms)')
group.addoption("--smbus-timeout", default=True, dest='smbus_timeout', type=bool, help='Use SMBUS timeout limit (default: True)')
@pytest.fixture(scope='session')
def board(request):
board = cpboard.CPboard.from_try_all(request.config.option.boarddev)
board.open()
board.repl.reset()
return board
class I2C:
def __init__(self, bus):
self.bus = periphery.I2C('/dev/i2c-%d' % bus)
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
def close(self):
self.bus.close()
def transfer(self, address, messages):
#__tracebackhide__ = True # Hide this from pytest traceback
self.bus.transfer(address, messages)
Message = periphery.I2C.Message
def read(self, address, n):
data = [0] * n
msgs = [I2C.Message(data, read=True)]
self.transfer(address, msgs)
return msgs[0].data
def write(self, address, data):
msgs = [I2C.Message(data)]
self.transfer(address, msgs)
def write_read(self, address, data, n):
recv = [0] * n
msgs = [I2C.Message(data), I2C.Message(recv, read=True)]
self.transfer(address, msgs)
return msgs[1].data
@pytest.fixture
def i2cbus(request):
return I2C(request.config.option.i2cbus)
| 28 | 162 | 0.655152 | [
"MIT"
] | notro/cp-smbusslave | tests/i2cslave/conftest.py | 1,708 | Python |
from typing import Dict
# TODO consolidate some of these imports
from vyper.semantics.types.user.struct import StructDefinition
from vyper.semantics.types.value.address import AddressDefinition
from vyper.semantics.types.value.array_value import BytesArrayDefinition
from vyper.semantics.types.value.bytes_fixed import Bytes32Definition
from vyper.semantics.types.value.numeric import Uint256Definition
CONSTANT_ENVIRONMENT_VARS: Dict[str, Dict[str, type]] = {
"block": {
"coinbase": AddressDefinition,
"difficulty": Uint256Definition,
"number": Uint256Definition,
"gaslimit": Uint256Definition,
"basefee": Uint256Definition,
"prevhash": Bytes32Definition,
"timestamp": Uint256Definition,
},
"chain": {"id": Uint256Definition},
"msg": {
"data": BytesArrayDefinition,
"gas": Uint256Definition,
"sender": AddressDefinition,
"value": Uint256Definition,
},
"tx": {"origin": AddressDefinition},
}
MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {
"self": AddressDefinition,
}
def get_constant_vars() -> Dict:
"""
Get a dictionary of constant environment variables.
"""
result = {}
for name, members in CONSTANT_ENVIRONMENT_VARS.items():
members = {k: v(is_immutable=True) for k, v in members.items()}
result[name] = StructDefinition(name, members, is_immutable=True)
return result
def get_mutable_vars() -> Dict:
"""
Get a dictionary of mutable environment variables (those that are
modified during the course of contract execution, such as `self`).
"""
return {name: type_(is_immutable=True) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}
| 31.888889 | 95 | 0.699187 | [
"Apache-2.0"
] | GDGSNF/vyper | vyper/semantics/environment.py | 1,722 | Python |
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
import datetime # for checking renewal date range.
from django import forms
class RenewBookForm(forms.Form):
"""Form for a librarian to renew books."""
renewal_date = forms.DateField(
help_text="Enter a date between now and 4 weeks (default 3).")
def clean_renewal_date(self):
data = self.cleaned_data['renewal_date']
# Check date is not in past.
if data < datetime.date.today():
raise ValidationError(_('Invalid date - renewal in past'))
# Check date is in range librarian allowed to change (+4 weeks)
if data > datetime.date.today() + datetime.timedelta(weeks=4):
raise ValidationError(
_('Invalid date - renewal more than 4 weeks ahead'))
# Remember to always return the cleaned data.
return data
class ReturnBookForm(forms.Form):
"""Form for a librarian to renew books."""
return_date = forms.DateField(
help_text="Enter a date between borrow date and today.")
penalty = forms.IntegerField(
help_text="Penalty (in IDR).",
initial=0)
def clean_return_date(self):
data = self.cleaned_data['return_date']
# Check date is not in future.
if data > datetime.date.today():
raise ValidationError(_('Invalid date - return in future'))
return data | 34.348837 | 74 | 0.650643 | [
"CC0-1.0"
] | PMPL-Arieken/django-locallibrary-tutorial | catalog/forms.py | 1,477 | Python |
#!/usr/bin/env python
# Import modules
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
import rospy
import tf
from geometry_msgs.msg import Pose
from std_msgs.msg import Float64
from std_msgs.msg import Int32
from std_msgs.msg import String
from pr2_robot.srv import *
from rospy_message_converter import message_converter
import yaml
# Helper function to get surface normals
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
# Helper function to create a yaml friendly dictionary from ROS messages
def make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):
yaml_dict = {}
yaml_dict["test_scene_num"] = test_scene_num.data
yaml_dict["arm_name"] = arm_name.data
yaml_dict["object_name"] = object_name.data
yaml_dict["pick_pose"] = message_converter.convert_ros_message_to_dictionary(pick_pose)
yaml_dict["place_pose"] = message_converter.convert_ros_message_to_dictionary(place_pose)
return yaml_dict
# Helper function to output to yaml file
def send_to_yaml(yaml_filename, dict_list):
data_dict = {"object_list": dict_list}
with open(yaml_filename, 'w') as outfile:
yaml.dump(data_dict, outfile, default_flow_style=False)
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# Exercise-2 TODOs:
# TODO: Convert ROS msg to PCL data
cloud = ros_to_pcl(pcl_msg)
# TODO: Statistical Outlier Filtering
# creating a statistical outlier filter object for reducing noise
outlier_filter = cloud.make_statistical_outlier_filter()
# Set the number of neighboring points to analyze for any given point
outlier_filter.set_mean_k(10)
# Set threshold scale factor
x = 0.5
# Any point with a mean distance larger than global (mean distance+x*std_dev) will be considered outlier
outlier_filter.set_std_dev_mul_thresh(x)
# Finally call the filter function
cloud_filtered = outlier_filter.filter()
# TODO: Voxel Grid Downsampling
# Create a VoxelGrid filter object for our input point cloud
vox = cloud_filtered.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
LEAF_SIZE = 0.01
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
# TODO: PassThrough Filter
# Create a PassThrough filter object.
passthrough = cloud_filtered.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.6
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
# Finally use the filter function to obtain the resultant point cloud.
cloud_filtered = passthrough.filter()
#PassThrough Filter for removing outliers in y direction
# Create a PassThrough filter object.
passthrough = cloud_filtered.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = 'y'
passthrough.set_filter_field_name(filter_axis)
axis_min = -0.4
axis_max = 0.4
passthrough.set_filter_limits(axis_min, axis_max)
# Finally use the filter function to obtain the resultant point cloud.
cloud_filtered = passthrough.filter()
# TODO: RANSAC Plane Segmentation
# Create the segmentation object
seg = cloud_filtered.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Experiment with different values for max_distance
# for segmenting the table
max_distance = 0.03
seg.set_distance_threshold(max_distance)
# Call the segment function to obtain set of inlier indices and model coefficients
inliers, coefficients = seg.segment()
# TODO: Extract inliers and outliers
cloud_table = cloud_filtered.extract(inliers, negative=False)
cloud_objects = cloud_filtered.extract(inliers, negative=True)
# TODO: Euclidean Clustering
white_cloud = XYZRGB_to_XYZ(cloud_objects)
tree = white_cloud.make_kdtree()
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
# Set tolerances for distance threshold
# as well as minimum and maximum cluster size (in points)
ec.set_ClusterTolerance(0.05)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(25000)
# Search the k-d tree for clusters
ec.set_SearchMethod(tree)
# Extract indices for each of the discovered clusters
cluster_indices = ec.Extract()
# TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
# TODO: Convert PCL data to ROS messages
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# TODO: Publish ROS messages
pcl_cluster_pub.publish(ros_cluster_cloud)
# Exercise-3 TODOs:
# Classify the clusters! (loop through each detected cluster one at a time)
detected_objects_labels = []
detected_objects = []
for index, pts_list in enumerate(cluster_indices):
# Grab the points for the cluster
pcl_cluster = cloud_objects.extract(pts_list)
# convert pcl to ros
ros_cluster = pcl_to_ros(pcl_cluster)
# Compute the associated feature vector
chists = compute_color_histograms(ros_cluster, using_hsv=True)
normals = get_normals(ros_cluster)
nhists = compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
# Make the prediction
prediction = clf.predict(scaler.transform(feature.reshape(1,-1)))
label = encoder.inverse_transform(prediction)[0]
detected_objects_labels.append(label)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += .4
object_markers_pub.publish(make_label(label,label_pos, index))
# Add the detected object to the list of detected objects.
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
detected_objects.append(do)
# Publish the list of detected objects
detected_objects_pub.publish(detected_objects)
# Suggested location for where to invoke your pr2_mover() function within pcl_callback()
# Could add some logic to determine whether or not your object detections are robust
# before calling pr2_mover()
try:
pr2_mover(detected_objects)
except rospy.ROSInterruptException:
pass
# function to load parameters and request PickPlace service
def pr2_mover(object_list):
# TODO: Initialize variables
object_name = []
object_group = []
TEST_SCENE_NUM = Int32()
OBJECT_NAME = String()
WHICH_ARM = String()
PICK_POSE = Pose()
PLACE_POSE = Pose()
labels = []
centroids = [] # to be list of tuples (x, y, z)
# Store labels and their centroids in lists
for object in object_list:
labels.append(object.label)
points_arr = ros_to_pcl(object.cloud).to_array()
centroids.append(np.mean(points_arr, axis=0)[:3])
left_position = None
right_position = None
dict_list = []
# TODO: Get/Read parameters
object_list_param = rospy.get_param('/object_list')
drop_box_param = rospy.get_param('/dropbox')
# Get left and right box positions
for i in range(len(drop_box_param)):
if(drop_box_param[i]['name'] == 'left'):
left_position = drop_box_param[i]['position']
else:
right_position = drop_box_param[i]['position']
# TODO: Rotate PR2 in place to capture side tables for the collision map
# TODO: Loop through the pick list
for i in range(len(object_list_param)):
# Get object name from pick list
OBJECT_NAME.data = object_list_param[i]['name']
# Specify the test scene number
TEST_SCENE_NUM.data = 2
# Get index of object from stored list
obj_idx = labels.index(object_list_param[i]['name'])
# Stop if object was not detected in the scene
if(obj_idx == -1):
rospy.loginfo('Object not detected')
return
# TODO: Get the PointCloud for a given object and obtain it's centroid# Calculate centroids
centroid = centroids[obj_idx]
# Cast centroids to native Python float type and assign centroid to pick_pose
PICK_POSE.position.x = np.asscalar(centroid[0])
PICK_POSE.position.y = np.asscalar(centroid[1])
PICK_POSE.position.z = np.asscalar(centroid[2])
# TODO: Assign the arm to be used for pick_place
if(object_list_param[i]['group'] == 'red'):
WHICH_ARM.data = 'left'
PLACE_POSE.position.x = left_position[0]
PLACE_POSE.position.y = left_position[1]
PLACE_POSE.position.z = left_position[2]
else:
WHICH_ARM.data = 'right'
PLACE_POSE.position.x = right_position[0]
PLACE_POSE.position.y = right_position[1]
PLACE_POSE.position.z = right_position[2]
# TODO: Create a list of dictionaries (made with make_yaml_dict()) for later output to yaml format
yaml_dict = make_yaml_dict(TEST_SCENE_NUM, OBJECT_NAME, WHICH_ARM, PICK_POSE, PLACE_POSE)
dict_list.append(yaml_dict)
# Wait for 'pick_place_routine' service to come up
rospy.wait_for_service('pick_place_routine')
try:
pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace)
# TODO: Insert your message variables to be sent as a service request
resp = pick_place_routine(TEST_SCENE_NUM, OBJECT_NAME, WHICH_ARM, PICK_POSE, PLACE_POSE)
print ("Response: ",resp.success)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
# TODO: Output your request parameters into output yaml file
# send_to_yaml('src/RoboND-Perception-Project/pr2_robot/config/output_3.yaml',dict_list)
# print('yaml file saved')
if __name__ == '__main__':
# TODO: ROS node initialization
rospy.init_node('pick_and_place_main')
# TODO: Create Subscribers
pcl_sub = rospy.Subscriber("/pr2/world/points", pc2.PointCloud2, pcl_callback, queue_size=1)
# TODO: Create Publishers
pcl_cluster_pub = rospy.Publisher("/pcl_cluster", PointCloud2, queue_size=1)
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
# TODO: Load Model From disk
model = pickle.load(open('model.sav', 'rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# TODO: Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin() | 35.961988 | 108 | 0.70583 | [
"MIT"
] | navinrahim/RoboND-Perception-Project | pr2_robot/scripts/project_run.py | 12,299 | Python |
from flask import render_template, request, redirect, url_for, flash
from datetime import datetime as dt
from app.forms import AdicionarQuarto, VerificarDisponibilidade
from app.models import Rooms, Hotels, User, Reservation, Status
from app import db
def adicionar_quarto(user_id):
form_reserva = VerificarDisponibilidade()
user = User.query.filter_by(id=user_id).first()
form = AdicionarQuarto()
if user.profile not in ['admin', 'gerente']:
return '<h1>Erro! Você não pode acessar este conteúdo!</h1>'
if user.hotel_id is None:
hoteis = Hotels.query.order_by(Hotels.created_at)
form.hotel_id.choices = [(hotel.id, hotel.name) for hotel in hoteis if hotel.user_id == user_id]
else:
hoteis = Hotels.query.filter_by(id=user.hotel_id).order_by(Hotels.created_at)
form.hotel_id.choices = [(hotel.id, hotel.name) for hotel in hoteis]
if request.method == 'POST':
if form.validate_on_submit():
room = Rooms.query.filter_by(hotel_id=form.hotel_id.data, number=form.number.data).first()
if room is None:
room = Rooms(number=form.number.data,
hotel_id=form.hotel_id.data,
name=form.name.data,
short_description=form.short_description.data,
kind=form.kind.data,
phone_extension=form.phone_extension.data,
price=float(form.price.data.replace('.','').replace(',','.')),
guest_limit=form.guest_limit.data)
db.session.add(room)
db.session.commit()
flash('Quarto cadastrado com sucesso!', 'success')
else:
flash('Quarto já existe...', 'danger')
return redirect(url_for('ocupacao_quartos_endpoint', id=form.hotel_id.data))
return render_template('adicionar_quartos.html',
form=form,
hoteis=hoteis,
user=user,
titulo='Adicionar quarto',
form_reserva=form_reserva
)
def ocupacao_quartos(id, user_id):
form_reserva = VerificarDisponibilidade()
user = User.query.filter_by(id=user_id).first()
hotel = Hotels.query.get_or_404(id)
if hotel.user_id != user_id and user.hotel_id != hotel.id:
return '<h1>Erro! Você não pode acessar este conteúdo!</h1>'
quartos = Rooms.query.filter_by(hotel_id=id).order_by(Rooms.number)
reservas = Reservation.query.order_by(Reservation.id)
hoje = dt.strptime(dt.today().strftime('%Y-%m-%d'), '%Y-%m-%d')
status_reservas = [(r.room_id, (r.check_in <= hoje <= r.check_out)) for r in reservas if r.status == Status.ATIVO]
status_reservas = [status for status in status_reservas if status[1] is True]
status_reservas = dict(set(status_reservas))
return render_template('ocupacao_quartos.html',
quartos=quartos,
form_reserva=form_reserva,
status_reservas=status_reservas
)
def deletar_quarto(id_quarto, user_id):
user = User.query.filter_by(id=user_id).first()
quarto = Rooms.query.get_or_404(id_quarto)
id_hotel = quarto.hotel_id
hotel = Hotels.query.get_or_404(id_hotel)
if hotel.user_id != user_id and user.hotel_id != hotel.id or user.profile not in ['admin', 'gerente']:
return '<h1>Erro! Você não pode acessar este conteúdo!</h1>'
db.session.delete(quarto)
db.session.commit()
flash('Quarto deletado com sucesso!', 'success')
return redirect(f'/ocupacao-quartos/{id_hotel}')
def editar_quarto(quarto_id, user_id):
form_reserva = VerificarDisponibilidade()
form = AdicionarQuarto()
user = User.query.filter_by(id=user_id).first()
user_id_room = Rooms \
.query.filter_by(id=quarto_id) \
.join(Hotels, Rooms.hotel_id == Hotels.id).add_columns(Hotels.user_id).add_columns(Hotels.id)
if [i.user_id for i in user_id_room][0] != user_id and user.hotel_id != [i for i in user_id_room][0][2]:
return '<h1>Erro! Você não pode acessar este conteúdo!</h1>'
if user.hotel_id is None:
hoteis = Hotels.query.order_by(Hotels.created_at)
form.hotel_id.choices = [(hotel.id, hotel.name) for hotel in hoteis if hotel.user_id == user_id]
else:
hoteis = Hotels.query.filter_by(id=user.hotel_id).order_by(Hotels.created_at)
form.hotel_id.choices = [(hotel.id, hotel.name) for hotel in hoteis]
if form.validate_on_submit():
if request.method == 'POST':
to_update = Rooms.query.get_or_404(quarto_id)
to_update.hotel_id = request.form['hotel_id']
to_update.number = request.form['number']
to_update.name = request.form['name']
to_update.short_description = request.form['short_description']
to_update.kind = request.form['kind']
to_update.phone_extension = request.form['phone_extension']
to_update.price = float(request.form['price'].replace('.','').replace(',','.'))
to_update.guest_limit = request.form['guest_limit']
db.session.commit()
flash('Quarto editado com sucesso!', 'success')
return redirect(url_for('ocupacao_quartos_endpoint', id=request.form['hotel_id']))
room = Rooms.query.filter_by(id=quarto_id).first()
form.hotel_id.default = room.hotel_id
form.process()
form.number.data = room.number
form.name.data = room.name
form.short_description.data = room.short_description
form.kind.data = room.kind
form.phone_extension.data = room.phone_extension
form.price.data = str(room.price).replace('.',',')
form.guest_limit.data = room.guest_limit
return render_template('adicionar_quartos.html',
form=form,
user=user,
quarto=room,
titulo='Editar quarto',
form_reserva=form_reserva
)
| 45.416058 | 118 | 0.61829 | [
"MIT"
] | ES-UFABC/Grupo-17 | app/scripts/ocupacao_quartos.py | 6,235 | Python |
#!/usr/bin/env python3
#
# Copyright 2016 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import os
import json
import shutil
import signal
import subprocess
import sys
import tempfile
# Get signal names from numbers in Python
# http://stackoverflow.com/a/2549950
SIGNAMES = dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_'))
class Error(Exception):
pass
class Executable(object):
def __init__(self, exe, *before_args, **kwargs):
self.exe = exe
self.before_args = list(before_args)
self.after_args = []
self.basename = kwargs.get('basename',
os.path.basename(exe)).replace('.exe', '')
self.error_cmdline = kwargs.get('error_cmdline', True)
self.stdout_handle = self._ForwardHandle(kwargs.get('forward_stdout'))
self.stderr_handle = self._ForwardHandle(kwargs.get('forward_stderr'))
self.verbose = False
def _ForwardHandle(self, forward):
return None if forward else subprocess.PIPE
def _RunWithArgsInternal(self, *args, **kwargs):
cmd = [self.exe] + self.before_args + list(args) + self.after_args
cmd_str = ' '.join(cmd)
if self.verbose:
print(cmd_str)
if self.error_cmdline:
err_cmd_str = cmd_str.replace('.exe', '')
else:
err_cmd_str = self.basename
stdout = ''
stderr = ''
error = None
try:
process = subprocess.run(cmd, check=False, text=True,
stdout=self.stdout_handle,
stderr=self.stderr_handle, **kwargs)
stdout = process.stdout
stderr = process.stderr
if process.returncode < 0:
# Terminated by signal
signame = SIGNAMES.get(-process.returncode, '<unknown>')
error = Error('Signal raised running "%s": %s\n%s' % (err_cmd_str,
signame, stderr))
elif process.returncode > 0:
error = Error('Error running "%s" (%d):\n%s' % (err_cmd_str, process.returncode, stderr))
except OSError as e:
error = Error('Error running "%s": %s' % (err_cmd_str, str(e)))
return stdout, stderr, error
def RunWithArgsForStdout(self, *args, **kwargs):
stdout, stderr, error = self._RunWithArgsInternal(*args, **kwargs)
if error:
raise error
return stdout
def RunWithArgs(self, *args, **kwargs):
stdout, stderr, error = self._RunWithArgsInternal(*args, **kwargs)
if stdout:
sys.stdout.write(stdout)
if error:
raise error
def AppendArg(self, arg):
self.after_args.append(arg)
def AppendOptionalArgs(self, option_dict):
for option, value in option_dict.items():
if value:
if value is True:
self.AppendArg(option)
else:
self.AppendArg('%s=%s' % (option, value))
@contextlib.contextmanager
def TempDirectory(out_dir, prefix=None):
if out_dir:
out_dir_is_temp = False
if not os.path.exists(out_dir):
os.makedirs(out_dir)
else:
out_dir = tempfile.mkdtemp(prefix=prefix)
out_dir_is_temp = True
try:
yield out_dir
finally:
if out_dir_is_temp:
shutil.rmtree(out_dir)
def ChangeExt(path, new_ext):
return os.path.splitext(path)[0] + new_ext
def ChangeDir(path, new_dir):
return os.path.join(new_dir, os.path.basename(path))
def Hexdump(data):
DUMP_OCTETS_PER_LINE = 16
DUMP_OCTETS_PER_GROUP = 2
p = 0
end = len(data)
lines = []
while p < end:
line_start = p
line_end = p + DUMP_OCTETS_PER_LINE
line = '%07x: ' % p
while p < line_end:
for i in range(DUMP_OCTETS_PER_GROUP):
if p < end:
line += '%02x' % data[p]
else:
line += ' '
p += 1
line += ' '
line += ' '
p = line_start
for i in range(DUMP_OCTETS_PER_LINE):
if p >= end:
break
x = data[p]
if x >= 32 and x < 0x7f:
line += '%c' % x
else:
line += '.'
p += 1
line += '\n'
lines.append(line)
return lines
def GetModuleFilenamesFromSpecJSON(json_filename):
with open(json_filename) as json_file:
json_data = json.load(json_file)
return [m['filename'] for m in json_data['commands'] if 'filename' in m]
| 30.54023 | 105 | 0.579225 | [
"Apache-2.0"
] | ChristianGutman/wabt | test/utils.py | 5,314 | Python |
import pandas as pd
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
import time
#Criteo's CTR Prediction Challenge
#Creating a list of the numerical and categorical variables
intnames = []
catnames = []
for i in range(13):
intnames += ['i'+ str(i+1)]
for i in range(26):
catnames += ['c'+ str(i+1)]
colnames = ['clicked'] + intnames + catnames
#Load Data (500,000 rows) and name columns
ds = pd.read_csv("train.txt", nrows=500000, sep='\t', header=None, names = colnames)
#Basic info of dataset
ds.info()
ds['clicked'].mean()
#Number of categories per each category variable
categoriesPerVariable = {}
for var in catnames:
varList = ds[var].tolist()
varUnique = set(varList)
print(var, len(varUnique))
categoriesPerVariable[var] = len(varUnique)
catnamesFinal = []
#Delete variables with more than 100 categories
for var in categoriesPerVariable:
if categoriesPerVariable[var] > 100:
ds = ds.drop(var, 1)
print(var, 'DELETED')
else: catnamesFinal += [var]
ds.info()
#Create dummy variables:
for var in catnamesFinal:
ds = pd.concat([ds, pd.get_dummies(ds[var], prefix = var, prefix_sep = '_')], axis=1)
ds = ds.drop(var, axis=1)
print('Created dummy variables for: ', var)
ds.shape
#Creating train and test datasets
y = ds.clicked
x_cols = set(ds.columns)
x_cols.remove('clicked')
X = ds[list(x_cols)]
#Train, test and Validation Sets (60%, 20%, 20%)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, train_size=0.5)
#More Preprocessing
# - Fill NaN values in X_train, X_test, X_val with the mean of X_train
X_train[intnames] = X_train[intnames].fillna(X_train[intnames].mean())
X_test[intnames] = X_test[intnames].fillna(X_train[intnames].mean())
X_val[intnames] = X_val[intnames].fillna(X_train[intnames].mean())
#Dataset with PCA
#Choosing the number of components
from sklearn.decomposition import PCA
for e in range(10):
pca1 = PCA(n_components=e)
pca1.fit(X_train)
exp_var = 0
for i in pca1.explained_variance_ratio_:
exp_var += i
print(e, round(exp_var,3))
pca = PCA(n_components=5)
pca.fit(X_train)
X_train_PCA = pd.DataFrame(pca.transform(X_train))
X_test_PCA = pd.DataFrame(pca.transform(X_test))
X_val_PCA = pd.DataFrame(pca.transform(X_val))
'''
###########################################
# FUNCTIONS
###########################################
'''
def ROCcurve(y_pred, y_test):
# Compute ROC curve and ROC area for each class
n_classes = y_pred.shape[1]
fpr = dict()
tpr = dict()
roc_auc = dict()
y_test1 = []
for index, row in y_test.iteritems():
if row == 0: y_test1 += [[1,0]]
else: y_test1 += [[0,1]]
y_test1 = np.array(y_test1)
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test1[:,i], y_pred[:,i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test1.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
return fpr, tpr, roc_auc
#Precision Recall Curve
def precision_recall(y_test, y_pred):
ydp = []
for i in range(len(y_pred)): ydp+= [y_pred[i][1]]
precision, recall, _ = precision_recall_curve(y_test, ydp)
return precision, recall
def ypd(y_pred):
ydp = []
for i in range(len(y_pred)): ydp+= [y_pred[i][1]]
return ydp
#Return all the Algorithm Curves Info
def algorithmCurvesInfo(alg_name, y_pred, y_test):
algDict = {}
algDict['alg_name'] = alg_name
algDict['fpr'], algDict['tpr'], \
algDict['roc_auc'] = ROCcurve(y_pred, y_test)
algDict['precision'], algDict['recall'], = precision_recall(y_test, y_pred)
algDict['average_precision'] = average_precision_score(y_test, ypd(y_pred))
return algDict
#PLOT ROC CURVE
def plotROC(alg_fpr_tpr_rocDict, color_paletteList, tuple_size, path_name):
colors = []
for p in color_paletteList:
for c in plt.get_cmap(p).colors:
colors += [c]
#Dict with key --> name of algorithm (dict)
#Each algorithm dict:
# - fpr
# - tpr
# - roc_auc
# - alg_name: algorithm name to be shown
plt.figure(figsize=tuple_size)
col = 0
for al in alg_fpr_tpr_rocDict:
fpr = alg_fpr_tpr_rocDict[al]['fpr']
tpr = alg_fpr_tpr_rocDict[al]['tpr']
roc_auc = alg_fpr_tpr_rocDict[al]['roc_auc']
alg_name = alg_fpr_tpr_rocDict[al]['alg_name']
plt.plot(fpr[1], tpr[1], color= colors[col], alpha = 0.7,
label= alg_name + ' (area = %0.3f)' % roc_auc[1])
col += 1
plt.plot([0, 1], [0, 1], color='black', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curves per each algorithm')
plt.legend(loc="lower right")
plt.savefig(path_name + '_ROC-AUC.png')
plt.show()
#Plot the Precision-Recall Curve
def plotPrecisionRecall(alg_pre_recDict, color_paletteList, tuple_size, path_name):
colors = []
for p in color_paletteList:
for c in plt.get_cmap(p).colors:
colors += [c]
col = 0
plt.figure(figsize=tuple_size)
for al in alg_pre_recDict:
recall = alg_pre_recDict[al]['recall']
precision = alg_pre_recDict[al]['precision']
average_precision = alg_pre_recDict[al]['average_precision']
alg_name = alg_pre_recDict[al]['alg_name']
'''
plt.step(recall, precision, color=colors[col], alpha=0.8, where='post', \
label= alg_name + ' (area = %0.3f)'.format(average_precision))
'''
plt.plot(recall, precision, color=colors[col], alpha=0.8, \
label= alg_name + ' (area = %0.3f)' % average_precision)
col += 1
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.0])
plt.xlim([0.0, 1.0])
plt.legend(loc="upper right")
plt.title('Precision-Recall curve for CLICKED')
plt.savefig(path_name + '_PrecisionRecall.png')
plt.show()
#Algorithm Process Automation
def algorithmAutomat(algorithm, X_train, y_train, X_test, name):
algDict = {}
train_s = time.time()
algorithm.fit(X_train, y_train)
train_e = time.time()
pred_s = time.time()
y_pred = algorithm.predict_proba(X_test)
pred_e = time.time()
algDict = algorithmCurvesInfo(name, y_pred, y_test)
algDict['train_time'] = round(train_e - train_s,2)
algDict['predict_time'] = round(pred_e - pred_s,2)
algDict['model'] = algorithm
print(name + ' Prediction calculated')
print('Elapsed time: ' + str(round(pred_e-train_s,2)) + ' seconds')
return algDict
#Algorithm Validation Prediction and Curves
def algorithmValidation(model, X_validation, y_validation, name):
algDict = {}
start = time.time()
y_pred = model.predict_proba(X_validation)
end = time.time()
algDict = algorithmCurvesInfo(name, y_pred, y_validation)
algDict['prediction_time'] = end - start
print(name + ' Prediction calculated')
return algDict
'''
###########################################
# ALGORITHMS
###########################################
'''
#Path where I will save the ROC and Precision-Recall curves
path = os.getcwd() + '/graphs/'
#Dictionaries to save algorithms' results for dataset with and without PCA
algs = {}
algsPCA = {}
######################################
# Logistic Regression
######################################
from sklearn.linear_model import LogisticRegression
#Parameter tuning options
regularizers = ['l1', 'l2']
C = [0.001,0.01,0.1,1,10,100,1000]
algs['lr'] = {}
for r in regularizers:
for c in C:
#Algorithm name based on tuning options
name = 'LogReg_' + str(r) + '_' + str(c)
logreg = LogisticRegression(penalty = r, C = c ,random_state = 0)
algs['lr'][name] = algorithmAutomat(logreg, X_train, y_train, X_test, name)
algsPCA['lr'] = {}
for r in regularizers:
for c in C:
name = 'LogRegPCA_' + str(r) + '_' + str(c)
logreg = LogisticRegression(penalty = r, C = c ,random_state = 0)
algsPCA['lr'][name] = algorithmAutomat(logreg, X_train_PCA, y_train, X_test_PCA, name)
#Plots
plotROC(algs['lr'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'LR')
plotROC(algsPCA['lr'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'LRpca')
plotPrecisionRecall(algs['lr'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'LR')
plotPrecisionRecall(algsPCA['lr'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'LRpca')
######################################
# Random Forest
######################################
from sklearn.ensemble import RandomForestClassifier
n_estim = [2, 10, 50, 100, 1000]
max_d = [None, 2, 5, 10, 50]
algsPCA['rf'] = {}
for n in n_estim:
for m in max_d:
name = 'RandForPCA_est' + str(n) + '_depth' + str(m)
rf = RandomForestClassifier(n_estimators = n, max_depth=m, random_state=0)
algsPCA['lr'][name] = algorithmAutomat(rf, X_train_PCA, y_train, X_test_PCA, name)
algs['rf'] = {}
for n in n_estim:
for m in max_d:
name = 'RandFor_est' + str(n) + '_depth' + str(m)
rf = RandomForestClassifier(n_estimators = n, max_depth=m, random_state=0)
algs['rf'][name] = algorithmAutomat(rf, X_train, y_train, X_test, name)
plotROC(algs['rf'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'RF')
plotROC(algsPCA['rf'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'RFpca')
plotPrecisionRecall(algs['rf'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'RF')
plotPrecisionRecall(algsPCA['rf'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'RFpca')
######################################
# K-nearest neighbors
######################################
from sklearn.neighbors import KNeighborsClassifier
algsPCA['knn'] = {}
for k in [5, 10, 20, 50, 100, 200]:
name = 'KNN_PCA_' + str(k)
knn = KNeighborsClassifier(n_neighbors=k)
algsPCA['knn'][name] = algorithmAutomat(knn, X_train_PCA, y_train, X_test_PCA, name)
algs['knn'] = {}
for k in [5, 10, 20, 50, 100, 200]:
name = 'KNN_' + str(k)
knn = KNeighborsClassifier(n_neighbors=k)
algs['knn'][name] = algorithmAutomat(knn, X_train, y_train, X_test, name)
plotROC(algs['knn'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'KNN')
plotROC(algs['knn']['knn'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'KNNpca')
plotPrecisionRecall(algs['knn'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'KNN')
plotPrecisionRecall(algs['knn']['knn'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'KNNpca')
######################################
# Naive Bayes
######################################
from sklearn.naive_bayes import GaussianNB
algsPCA['nbayes'] = {}
algs['nbayes'] = {}
gnb = GaussianNB()
name = 'NaiveBayes_PCA'
algsPCA['nbayes'][name] = algorithmAutomat(gnb, X_train_PCA, y_train, X_test_PCA, name)
algs['nbayes'] = {}
gnb = GaussianNB()
name = 'NaiveBayes'
algs['nbayes'][name] = algorithmAutomat(gnb, X_train, y_train, X_test, name)
plotROC(algs['nbayes'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'NB')
plotPrecisionRecall(algs['nbayes'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'NB')
plotROC(algsPCA['nbayes'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'NB')
plotPrecisionRecall(algsPCA['nbayes'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'NBpca')
######################################
# AdaBoost
######################################
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
n_estim = [2, 10, 50]
max_d = [2, 10, 50, None]
algsPCA['adab'] = {}
for n in n_estim:
for m in max_d:
name = 'AdaBoost_PCA_est' + str(n) + '_depth' + str(m)
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=m),
algorithm="SAMME", n_estimators=n)
algsPCA['adab'][name] = algorithmAutomat(bdt, X_train_PCA, y_train, X_test_PCA, name)
algs['adab'] = {}
for n in n_estim:
for m in max_d:
name = 'AdaBoost_est' + str(n) + '_depth' + str(m)
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=m),
algorithm="SAMME", n_estimators=n)
algs['adab'][name] = algorithmAutomat(bdt, X_train, y_train, X_test, name)
plotROC(algs['adab'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'AB')
plotROC(algsPCA['adab'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'ABpca')
plotPrecisionRecall(algs['adab'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'AB')
plotPrecisionRecall(algsPCA['adab'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'ABpca')
######################################
# Linear Discriminant Analysis
######################################
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
algsPCA['lda'] = {}
lda = LDA()
name = 'LDA_PCA'
algsPCA['lda'] [name] = algorithmAutomat(lda, X_train_PCA, y_train, X_test_PCA, name)
algs['lda'] = {}
lda = LDA()
name = 'LDA'
algs['lda'][name] = algorithmAutomat(lda, X_train, y_train, X_test, name)
plotROC(algs['lda'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'LDA')
plotPrecisionRecall(algs['lda'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'LDA')
plotROC(algsPCA['lda'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'LDA')
plotPrecisionRecall(algsPCA['lda'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'LDApca')
######################################
# Gradient Boosting
######################################
from sklearn.ensemble import GradientBoostingClassifier
learning_rate = [0.1, 1]
max_depth = [3,5]
loss = ['deviance', 'exponential']
algsPCA['gradbo'] = {}
for l in learning_rate:
for m in max_depth:
for lo in loss:
name = 'GradBoost_PCA_lr' + str(l) + '_depth' + str(m) + '_loss-' + lo
gbc = GradientBoostingClassifier(learning_rate = l, max_depth = m, loss = lo)
algsPCA['gradbo'][name] = algorithmAutomat(gbc, X_train_PCA, y_train, X_test_PCA, name)
algs['gradbo'] = {}
for l in learning_rate:
for m in max_depth:
for lo in loss:
name = 'GradBoost_lr' + str(l) + '_depth' + str(m) + '_loss-' + lo
gbc = GradientBoostingClassifier(learning_rate = l, max_depth = m, loss = lo)
algs['gradbo'][name] = algorithmAutomat(gbc, X_train, y_train, X_test, name)
plotROC(algs['gradbo'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'GB')
plotROC(algsPCA['gradbo'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'GBpca')
plotPrecisionRecall(algs['gradbo'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'GB')
plotPrecisionRecall(algsPCA['gradbo'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'GBpca')
######################################
# NEURAL NETWORKS: MLP
######################################
#File mlp_train.py included in this repository
#This file contains the needed functions
import mlp_train as mlp
from keras.utils import to_categorical
#Creating dummy variables for the response variable (needed in Neural Nets)
y_train_cat = to_categorical(y_train)
y_test_cat = to_categorical(y_test)
algs['mlp'] = {}
#Load model trained in MLP.py
#Parameters:
baseName = 'MLP_'
batch_size = 200
epochs = 20
optimizer = ['adam', 'rmsprop']
denseLayers = [2, 3]
layerNeurons = [200, 500]
dropout = [True, False]
dropoutRate = 0.3
n_classes = 2
for o in optimizer:
for d in denseLayers:
for do in dropout:
for ln in layerNeurons:
name, y_pred_mlp, train_time = mlp.NeuralNetProcess(baseName, o, batch_size, epochs, d, ln,
do, dropoutRate, X_train, X_test, y_train_cat, y_test_cat, n_classes)
algs['mlp'][name] = algorithmCurvesInfo(name, y_pred_mlp, y_test)
algs['mlp'][name]['train_time'] = train_time
algs['mlp'][name]['predict_time'] = None
plotROC(algs['mlp'], ['Set1', 'Set2', 'Set3', 'Set1'], (10,8), path + 'MLP')
plotPrecisionRecall(algs['mlp'], ['Set1', 'Set2', 'Set3', 'Set1'], (10,8), path + 'MLP')
'''
###########################################
# TEST EXPORTING SUMMARIES
###########################################
'''
#Exporting summary info to csv file
headers = ['HAS_PCA', 'ALGORITHM', 'ALG_NAME', 'TRAIN_TIME', 'PREDICT_TIME', 'AVERAGE_PRECISION', 'ROC_AUC']
rows = []
for a in algsPCA:
print('---------------',a)
for k in algsPCA[a]:
row = []
row += ['PCA']
row += [a]
row += [k]
row += [str(algsPCA[a][k]['train_time'])]
row += [str(algsPCA[a][k]['predict_time'])]
row += [str(algsPCA[a][k]['average_precision'])]
row += [str(algsPCA[a][k]['roc_auc'][1])]
rows += [row]
for a in algs:
print('---------------',a)
for k in algs[a]:
row = []
row += ['REG']
row += [a]
row += [k]
row += [str(algs[a][k]['train_time'])]
row += [str(algs[a][k]['predict_time'])]
row += [str(algs[a][k]['average_precision'])]
row += [str(algs[a][k]['roc_auc'][1])]
rows += [row]
csvfile = ', '.join(headers)
for r in rows:
csvfile += '\n' + ', '.join(r)
f = open(os.getcwd() + "\\algorithmsDataset.csv",'w')
f.write(csvfile)
f.close()
'''
###########################################
# VALIDATION MODELS
###########################################
'''
#Select best tuned model for each algorithm and store the list
#Established a limit_train_time and limit_predict_time.
bestAlgs = {}
limitTrainTime = 400
limitPredictTime = 200
bestAlgs['PCA'] = {}
for a in algsPCA:
balg = ''
roc = 0
for k in algsPCA[a]:
if algsPCA[a][k]['roc_auc'][1] > roc and algsPCA[a][k]['train_time'] < limitTrainTime and algsPCA[a][k]['predict_time'] < limitPredictTime:
roc = algsPCA[a][k]['roc_auc'][1]
balg = k
bestAlgs['PCA'][balg] = roc
bestAlgs['REG'] = {}
for a in algs:
balg = ''
roc = 0
for k in algs[a]:
if algs[a][k]['roc_auc'][1] > roc and algs[a][k]['train_time'] < limitTime and algs[a][k]['predict_time'] < limitPredictTime:
roc = algs[a][k]['roc_auc'][1]
balg = k
bestAlgs['REG'][balg] = roc
'''
###########################################
# VALIDATION PREDICTIONS
###########################################
'''
#Predict results using the validation set for each selected model
VALalgs = {}
VALalgs['PCA'] = {}
for k in bestAlgs['PCA']: print(k)
name = 'LogRegPCA_l1_100'
VALalgs['PCA'][name] = algorithmValidation(algsPCA['lr'][name]['model'], X_val_PCA, y_val, name)
name = 'RandForPCA_est100_depth10'
VALalgs['PCA'][name] = algorithmValidation(algsPCA['rf'][name]['model'], X_val_PCA, y_val, name)
name = 'KNN_PCA_100'
VALalgs['PCA'][name] = algorithmValidation(algsPCA['knn'][name]['model'], X_val_PCA, y_val, name)
name = 'NaiveBayes_PCA'
VALalgs['PCA'][name] = algorithmValidation(algsPCA['nbayes'][name]['model'], X_val_PCA, y_val, name)
name = 'AdaBoost_PCA_est50_depth10'
VALalgs['PCA'][name] = algorithmValidation(algsPCA['adab'][name]['model'], X_val_PCA, y_val,name)
name = 'GradBoost_PCA_lr0.1_depth5_loss-deviance'
VALalgs['PCA'][name] = algorithmValidation(algsPCA['gradbo'][name]['model'], X_val_PCA, y_val, name)
name = 'LDA_PCA'
VALalgs['PCA'][name] = algorithmValidation(algsPCA['lda'][name]['model'], X_val_PCA, y_val, name)
VALalgs['REG'] = {}
for k in bestAlgs['REG']: print(k)
name = 'LogReg_l1_0.1'
VALalgs['REG'][name] = algorithmValidation(algs['lr'][name]['model'], X_val, y_val, name)
name = 'RandFor_est100_depth50'
VALalgs['REG'][name] = algorithmValidation(algs['rf'][name]['model'], X_val, y_val, name)
name = 'KNN_100'
VALalgs['REG'][name] = algorithmValidation(algs['knn'][name]['model'], X_val, y_val, name)
name = 'NaiveBayes'
VALalgs['REG'][name] = algorithmValidation(algs['nbayes'][name]['model'], X_val, y_val, name)
name = 'AdaBoost_est50_depth10'
VALalgs['REG'][name] = algorithmValidation(algs['adab'][name]['model'], X_val, y_val, name)
name = 'GradBoost_lr0.1_depth5_loss-deviance'
VALalgs['REG'][name] = algorithmValidation(algs['gradbo'][name]['model'], X_val, y_val, name)
name = 'LDA'
VALalgs['REG'][name] = algorithmValidation(algs['lda'][name]['model'], X_val, y_val, name)
name = 'MLP_rmsprop_b200_e20_DL2_200_drop-False_0.3'
bestModelPath = os.getcwd() + '/NNbestModel/'
bestModelPathLoss = bestModelPath + 'model_loss_' + name + '.hdf5'
y_pred_mlp, prediction_time = mlp.NeuralNetPredict(bestModelPathLoss, X_val)
VALalgs['REG'][name] = algorithmCurvesInfo(name, y_pred_mlp, y_val)
VALalgs['REG'][name]['prediction_time'] = prediction_time
#Plot & Save
plotROC(VALalgs['PCA'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'Val_PCA')
plotROC(VALalgs['REG'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'Val_REG')
plotPrecisionRecall(VALalgs['PCA'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'Val_PCA')
plotPrecisionRecall(VALalgs['REG'], ['Set1', 'Set2', 'Set3'], (10,8), path + 'Val_REG')
'''
###########################################
# VALIDATION EXPORTING SUMMARIES
###########################################
'''
#Exporting validation set data to csv
headers = ['HAS_PCA', 'ALGORITHM', 'ALG_NAME', 'TRAIN_TIME', 'PREDICT_TIME', 'AVERAGE_PRECISION', 'ROC_AUC']
val_rows = []
for a in algsPCA:
for k in algsPCA[a]:
if k in VALalgs['PCA']:
print('---------------',a)
row = []
row += ['PCA']
row += [a]
row += [k]
row += [str(algsPCA[a][k]['train_time'])]
row += [str(VALalgs['PCA'][k]['prediction_time'])]
row += [str(VALalgs['PCA'][k]['average_precision'])]
row += [str(VALalgs['PCA'][k]['roc_auc'][1])]
val_rows += [row]
for a in algs:
for k in algs[a]:
if k in VALalgs['REG']:
print('---------------',a)
row = []
row += ['REG']
row += [a]
row += [k]
row += [str(algs[a][k]['train_time'])]
row += [str(VALalgs['REG'][k]['prediction_time'])]
row += [str(VALalgs['REG'][k]['average_precision'])]
row += [str(VALalgs['REG'][k]['roc_auc'][1])]
val_rows += [row]
csvfile = ', '.join(headers)
for r in val_rows:
csvfile += '\n' + ', '.join(r)
f = open(os.getcwd() + "\\algorithmsValidationDataset.csv",'w')
f.write(csvfile)
f.close()
| 34.593558 | 147 | 0.598626 | [
"MIT"
] | alvarodemig/Algorithms-Performance-Comparison | AlgPerformComparison.py | 22,555 | Python |
from dataclasses import dataclass, field
from typing import List
@dataclass
class Doc:
class Meta:
name = "doc"
elem: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"pattern": r"\]",
}
)
| 17.277778 | 40 | 0.511254 | [
"MIT"
] | tefra/xsdata-w3c-tests | output/models/ms_data/regex/re_i45_xsd/re_i45.py | 311 | Python |
"""
Train LearnedPDReconstructor on 'lodopab'.
"""
import numpy as np
from dival import get_standard_dataset
from dival.measure import PSNR
from dival.reconstructors.learnedpd_reconstructor import LearnedPDReconstructor
from dival.reference_reconstructors import (
check_for_params, download_params, get_hyper_params_path)
from dival.util.plot import plot_images
IMPL = 'astra_cuda'
LOG_DIR = './logs/lodopab_learnedpd'
SAVE_BEST_LEARNED_PARAMS_PATH = './params/lodopab_learnedpd'
dataset = get_standard_dataset('lodopab', impl=IMPL)
ray_trafo = dataset.get_ray_trafo(impl=IMPL)
test_data = dataset.get_data_pairs('test', 100)
reconstructor = LearnedPDReconstructor(
ray_trafo, log_dir=LOG_DIR,
save_best_learned_params_path=SAVE_BEST_LEARNED_PARAMS_PATH)
#%% obtain reference hyper parameters
if not check_for_params('learnedpd', 'lodopab', include_learned=False):
download_params('learnedpd', 'lodopab', include_learned=False)
hyper_params_path = get_hyper_params_path('learnedpd', 'lodopab')
reconstructor.load_hyper_params(hyper_params_path)
#%% train
reconstructor.train(dataset)
#%% evaluate
recos = []
psnrs = []
for obs, gt in test_data:
reco = reconstructor.reconstruct(obs)
recos.append(reco)
psnrs.append(PSNR(reco, gt))
print('mean psnr: {:f}'.format(np.mean(psnrs)))
for i in range(3):
_, ax = plot_images([recos[i], test_data.ground_truth[i]],
fig_size=(10, 4))
ax[0].set_xlabel('PSNR: {:.2f}'.format(psnrs[i]))
ax[0].set_title('LearnedPDReconstructor')
ax[1].set_title('ground truth')
ax[0].figure.suptitle('test sample {:d}'.format(i))
| 32.019608 | 79 | 0.752603 | [
"MIT"
] | MBaltz/dival | dival/examples/ct_train_learnedpd.py | 1,633 | Python |
import numpy as np
import pytest
from agents.common import BoardPiece, NO_PLAYER, PLAYER1, PLAYER2, pretty_print_board, initialize_game_state, \
string_to_board, apply_player_action, connected_four, check_connect_topleft_bottomright
def test_initialize_game_state():
ret = initialize_game_state()
assert isinstance(ret, np.ndarray)
assert ret.dtype == BoardPiece
assert ret.shape == (6, 7)
assert np.all(ret == NO_PLAYER)
def test_output_pretty_print_board():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
ret = pretty_print_board(initialBoard)
assert ret != ''
def test_empty_pretty_print_board():
initialBoard = np.ndarray(shape=(7, 6), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
ret = pretty_print_board(initialBoard)
assert ret == '\n|==============|\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'|==============|\n' \
'|0 1 2 3 4 5 6 |'
def test_player1_pretty_print_board():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(PLAYER1)
ret = pretty_print_board(initialBoard)
assert ret == '\n|==============|\n' \
'|X X X X X X X |\n' \
'|X X X X X X X |\n' \
'|X X X X X X X |\n' \
'|X X X X X X X |\n' \
'|X X X X X X X |\n' \
'|X X X X X X X |\n' \
'|==============|\n' \
'|0 1 2 3 4 5 6 |'
def test_player2_pretty_print_board():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(PLAYER2)
ret = pretty_print_board(initialBoard)
assert ret == '\n|==============|\n' \
'|O O O O O O O |\n' \
'|O O O O O O O |\n' \
'|O O O O O O O |\n' \
'|O O O O O O O |\n' \
'|O O O O O O O |\n' \
'|O O O O O O O |\n' \
'|==============|\n' \
'|0 1 2 3 4 5 6 |'
def test_precision_pretty_print_board():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[0,0] = PLAYER1
ret = pretty_print_board(initialBoard)
assert ret == '\n|==============|\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'|X |\n' \
'|==============|\n' \
'|0 1 2 3 4 5 6 |'
def test_dimensions_pretty_print_board():
initialBoard = np.ndarray(shape=(7, 6), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
with pytest.raises(ValueError):
ret = pretty_print_board(initialBoard)
def test_invalid_piece_pretty_print_board():
initialBoard = np.ndarray(shape=(7, 6), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[0, 0] = 60
with pytest.raises(ValueError):
ret = pretty_print_board(initialBoard)
def test_string_to_board():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
print = '\n|==============|\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'| |\n' \
'|==============|\n' \
'|0 1 2 3 4 5 6 |'
ret = string_to_board(print)
assert ret.all() == initialBoard.all()
def test_drop_piece():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
ret = apply_player_action(initialBoard, 0, PLAYER1)
drop_board = np.ndarray(shape=(6, 7), dtype=BoardPiece)
drop_board.fill(NO_PLAYER)
drop_board[0,5] = 1
print(ret)
assert ret.all() == drop_board.all()
def test_connected_four_false():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
assert connected_four(initialBoard, PLAYER1, 5) == False
def test_connected_four_true():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(PLAYER1)
assert connected_four(initialBoard, PLAYER1, 5) == True
def test_connected_four_row_true():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = 1
initialBoard[5, 1] = 1
initialBoard[5, 2] = 1
initialBoard[5, 3] = 1
print(initialBoard)
assert connected_four(initialBoard, PLAYER1, 0) == True
def test_connected_four_row_false():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = 1
initialBoard[5, 1] = 1
initialBoard[5, 3] = 1
print(initialBoard)
with pytest.raises(AssertionError):
assert connected_four(initialBoard, PLAYER1, 0) == True
def test_connected_four_BL_TR_true():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = 1
initialBoard[4, 1] = 1
initialBoard[3, 2] = 1
initialBoard[2, 3] = 1
print(initialBoard)
assert connected_four(initialBoard, PLAYER1, 0) == True
def test_connected_four_BL_TR_false():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = 1
initialBoard[4, 1] = 1
initialBoard[3, 2] = 1
print(initialBoard)
with pytest.raises(AssertionError):
assert connected_four(initialBoard, PLAYER1, 0) == True
def test_connected_four_BR_TL_true():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 5] = 1
initialBoard[4, 4] = 1
initialBoard[3, 3] = 1
initialBoard[2, 2] = 1
print(initialBoard)
assert connected_four(initialBoard, PLAYER1, 5) == True
def test_connected_four_BR_TL_false():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 5] = 1
initialBoard[4, 4] = 1
initialBoard[2, 2] = 1
assert connected_four(initialBoard, PLAYER1, 5) == False
def test_diagonal_check_BLTR_true():
from agents.common import diagonal_check
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = 1
initialBoard[4, 1] = 1
initialBoard[3, 2] = 1
initialBoard[2, 3] = 1
print(initialBoard)
assert diagonal_check(initialBoard, PLAYER1, 0, 5, 1, -1) == True
def test_diagonal_check_TLBR_YX_true():
from agents.common import diagonal_check
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 4] = 1
initialBoard[4, 3] = 1
initialBoard[3, 2] = 1
initialBoard[2, 1] = 1
print(initialBoard)
assert diagonal_check(initialBoard, PLAYER1, 4, 5, -1, -1) == True
def test_TLBR_YX_true():
from agents.common import check_connect_topleft_bottomright
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 4] = 1
initialBoard[4, 3] = 1
initialBoard[3, 2] = 1
initialBoard[2, 1] = 1
print(initialBoard)
assert check_connect_topleft_bottomright(initialBoard, PLAYER1, 4, 0) == True
def test_diagonal_check_TLBR_XY_true():
from agents.common import diagonal_check
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 6] = 1
initialBoard[4, 5] = 1
initialBoard[3, 4] = 1
initialBoard[2, 3] = 1
print(initialBoard)
assert diagonal_check(initialBoard, PLAYER1, 6, 5, -1, -1) == True
def test_TLBR_XY_true():
from agents.common import check_connect_topleft_bottomright
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 6] = 1
initialBoard[4, 5] = 1
initialBoard[3, 4] = 1
initialBoard[2, 3] = 1
print(initialBoard)
assert check_connect_topleft_bottomright(initialBoard, PLAYER1, 6, 0)
def test_BL_TR_true():
from agents.common import check_connect_topright_bottomleft
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = 1
initialBoard[4, 1] = 1
initialBoard[3, 2] = 1
initialBoard[2, 3] = 1
print(initialBoard)
assert check_connect_topright_bottomleft(initialBoard, PLAYER1, 0, 0) == True
def test_BL_TR_false():
from agents.common import check_connect_topright_bottomleft
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = 1
initialBoard[4, 1] = 1
initialBoard[3, 2] = 1
print(initialBoard)
assert check_connect_topright_bottomleft(initialBoard, PLAYER1, 0, 0) == False
def test_end_state_win():
from agents.common import check_end_state, GameState
from agents.common import check_connect_topright_bottomleft
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = 1
initialBoard[4, 1] = 1
initialBoard[3, 2] = 1
initialBoard[2, 3] = 1
assert check_end_state(initialBoard, PLAYER1, 0) == GameState.IS_WIN
def test_end_state_still_playing():
from agents.common import check_end_state, GameState
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
assert check_end_state(initialBoard, PLAYER1, 0) == GameState.STILL_PLAYING
def test_end_state_draw():
from agents.common import check_end_state, GameState
x = np.zeros((6, 7), dtype=int)
x.fill(2)
x[1::2, ::2] = 1
x[::2, 1::2] = 1
print(x)
assert check_end_state(x, PLAYER1, 1) == GameState.IS_DRAW
def test_diagonal_neg():
#str = "|==============|\n|O |\n|X O |\n|O X O |\n|X X O O X |\n|O X O X X |\n|X O X X O |\n|==============|\n|0 1 2 3 4 5 6 |"
#board = string_to_board(str)
board = np.zeros((6, 7), dtype=int)
board[0, 0] = PLAYER2
board[1, 1] = PLAYER2
board[2, 2] = PLAYER2
board[3, 3] = PLAYER2
assert check_connect_topleft_bottomright(board, PLAYER2, 2, 3) == True
| 27.616967 | 173 | 0.59313 | [
"MIT"
] | Sibimobon/Connect4 | tests/test_common.py | 10,743 | Python |
def types():
from .config_result import GraphenePipelineConfigValidationResult
from .config import (
GrapheneEvaluationErrorReason,
GrapheneEvaluationStack,
GrapheneEvaluationStackEntry,
GrapheneEvaluationStackListItemEntry,
GrapheneEvaluationStackPathEntry,
GrapheneFieldNotDefinedConfigError,
GrapheneFieldsNotDefinedConfigError,
GrapheneMissingFieldConfigError,
GrapheneMissingFieldsConfigError,
GraphenePipelineConfigValidationError,
GraphenePipelineConfigValidationInvalid,
GrapheneRunConfigValidationInvalid,
GraphenePipelineConfigValidationValid,
GrapheneRuntimeMismatchConfigError,
GrapheneSelectorTypeConfigError,
)
from .logger import GrapheneLogger
from .mode import GrapheneMode
from .pipeline_errors import GrapheneInvalidSubsetError, GrapheneConfigTypeNotFoundError
from .pipeline_ref import GraphenePipelineReference, GrapheneUnknownPipeline
from .pipeline_run_stats import (
GraphenePipelineRunStatsSnapshot,
GrapheneRunStatsSnapshotOrError,
GrapheneRunStatsSnapshot,
)
from .pipeline import (
GrapheneAsset,
GrapheneIPipelineSnapshot,
GraphenePipeline,
GraphenePipelinePreset,
GraphenePipelineRun,
GrapheneRunOrError,
GrapheneRun,
)
from .resource import GrapheneResource
from .snapshot import GraphenePipelineSnapshot, GraphenePipelineSnapshotOrError
from .status import GrapheneRunStatus
from .subscription import (
GraphenePipelineRunLogsSubscriptionFailure,
GraphenePipelineRunLogsSubscriptionPayload,
GraphenePipelineRunLogsSubscriptionSuccess,
)
return [
GrapheneAsset,
GrapheneConfigTypeNotFoundError,
GrapheneEvaluationErrorReason,
GrapheneEvaluationStack,
GrapheneEvaluationStackEntry,
GrapheneEvaluationStackListItemEntry,
GrapheneEvaluationStackPathEntry,
GrapheneFieldNotDefinedConfigError,
GrapheneFieldsNotDefinedConfigError,
GrapheneInvalidSubsetError,
GrapheneIPipelineSnapshot,
GrapheneLogger,
GrapheneMissingFieldConfigError,
GrapheneMissingFieldsConfigError,
GrapheneMode,
GraphenePipeline,
GraphenePipelineConfigValidationError,
GraphenePipelineConfigValidationInvalid,
GrapheneRunConfigValidationInvalid,
GraphenePipelineConfigValidationResult,
GraphenePipelineConfigValidationValid,
GraphenePipelinePreset,
GraphenePipelineReference,
GraphenePipelineRun,
GraphenePipelineRunLogsSubscriptionFailure,
GraphenePipelineRunLogsSubscriptionPayload,
GraphenePipelineRunLogsSubscriptionSuccess,
GrapheneRunOrError,
GraphenePipelineRunStatsSnapshot,
GrapheneRunStatsSnapshotOrError,
GrapheneRunStatsSnapshot,
GrapheneRunStatus,
GraphenePipelineSnapshot,
GraphenePipelineSnapshotOrError,
GrapheneResource,
GrapheneRuntimeMismatchConfigError,
GrapheneRun,
GrapheneSelectorTypeConfigError,
GrapheneUnknownPipeline,
]
| 36.965909 | 92 | 0.744543 | [
"Apache-2.0"
] | kbd/dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/__init__.py | 3,253 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
VERSION = "unknown"
class MultiapiServiceClientConfiguration(Configuration):
"""Configuration for MultiapiServiceClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:keyword api_version: Api Version. The default value is "3.0.0". Note that overriding this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
**kwargs: Any
) -> None:
super(MultiapiServiceClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "3.0.0") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.credential = credential
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'multiapi-sample/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| 49.169231 | 144 | 0.702753 | [
"MIT"
] | changlong-liu/autorest.python | docs/samples/specification/multiapi/generated/azure/multiapi/sample/v3/aio/_configuration.py | 3,196 | Python |
def factorial(n):
if n == 0:
return 1.0
else:
return float(n) * factorial(n-1)
def taylor_exp(n):
return [1.0/factorial(i) for i in range(n)]
def taylor_sin(n):
res = []
for i in range(n):
if i % 2 == 1:
res.append((-1)**((i-1)/2)/float(factorial(i)))
else:
res.append(0.0)
return res
def benchmark():
taylor_exp(500)
taylor_sin(500)
if __name__ == '__main__':
benchmark()
| 20.44 | 60 | 0.483366 | [
"MIT"
] | UW-HPC/Parallelizing-Python-Workshop | lab4/taylor.py | 511 | Python |
"""
Support for TopoJSON was added in OGR 1.11 to the `GeoJSON` driver.
Starting at GDAL 2.3 support was moved to the `TopoJSON` driver.
"""
import fiona
from fiona.env import GDALVersion
import os
import pytest
from collections import OrderedDict
gdal_version = GDALVersion.runtime()
driver = "TopoJSON" if gdal_version.at_least((2, 3)) else "GeoJSON"
has_driver = driver in fiona.drvsupport.supported_drivers.keys()
@pytest.mark.skipif(not gdal_version.at_least((1, 11)), reason="Requires GDAL >= 1.11")
@pytest.mark.skipif(not has_driver, reason="Requires {} driver".format(driver))
def test_read_topojson(data_dir):
"""Test reading a TopoJSON file
The TopoJSON support in GDAL is a little unpredictable. In some versions
the geometries or properties aren't parsed correctly. Here we just check
that we can open the file, get the right number of features out, and
that they have a geometry and some properties. See GH#722.
"""
with fiona.open(os.path.join(data_dir, "example.topojson"), "r") as collection:
features = list(collection)
assert len(features) == 3, "unexpected number of features"
for feature in features:
assert isinstance(feature["properties"], OrderedDict)
assert len(feature["properties"]) > 0
assert feature["geometry"]["type"] in {"Point", "LineString", "Polygon"}
| 37.833333 | 87 | 0.725404 | [
"BSD-3-Clause"
] | HirniMeshram1/Fiona | tests/test_topojson.py | 1,362 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Youssef Restom and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.desk.doctype.notification_log.notification_log import (
enqueue_create_notification,
)
class ExtraNotificationLog(Document):
def after_insert(self):
self.make_notification_log()
def make_notification_log(self):
alert_doc = frappe.get_doc(self.doctype_name, self.doc_name)
users = []
owner_email = frappe.get_value("User", alert_doc.owner, "email")
if owner_email:
users.append(owner_email)
modified_by_email = frappe.get_value("User", alert_doc.modified_by, "email")
if modified_by_email:
users.append(modified_by_email)
if len(users) == 0:
return
notification_doc = {
"type": "Share",
"document_type": self.doctype_name,
"subject": self.subject,
"document_name": self.doc_name,
"from_user": frappe.session.user,
}
enqueue_create_notification(users, notification_doc)
| 32.594595 | 84 | 0.665837 | [
"MIT"
] | Govind-Jangid/erpnext_telegram | erpnext_telegram_integration/extra_notifications/doctype/extra_notification_log/extra_notification_log.py | 1,206 | Python |
import os
import sys
import logging
from typing import Optional, List
from datetime import datetime
from pythonjsonlogger import jsonlogger
from . import dirs
from .decorators import deprecated
# NOTE: Will be removed in a future version since it's not compatible with running a multi-service process
# TODO: prefix with `_`
log_file_path = None
@deprecated
def get_log_file_path() -> Optional[str]: # pragma: no cover
"""DEPRECATED: Use get_latest_log_file instead."""
return log_file_path
def setup_logging(
name: str,
testing=False,
verbose=False,
log_stderr=True,
log_file=False,
log_file_json=False,
): # pragma: no cover
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG if verbose else logging.INFO)
root_logger.handlers = []
if log_stderr:
root_logger.addHandler(_create_stderr_handler())
if log_file:
root_logger.addHandler(
_create_file_handler(name, testing=testing, log_json=log_file_json)
)
def excepthook(type_, value, traceback):
root_logger.exception("Unhandled exception", exc_info=(type_, value, traceback))
# call the default excepthook if log_stderr isn't true (otherwise it'll just get duplicated)
if not log_stderr:
sys.__excepthook__(type_, value, traceback)
sys.excepthook = excepthook
def _get_latest_log_files(name, testing=False) -> List[str]: # pragma: no cover
"""Returns a list with the paths of all available logfiles for `name` sorted by latest first."""
log_dir = dirs.get_log_dir(name)
files = filter(lambda filename: name in filename, os.listdir(log_dir))
files = filter(
lambda filename: "testing" in filename
if testing
else "testing" not in filename,
files,
)
return [os.path.join(log_dir, filename) for filename in sorted(files, reverse=True)]
def get_latest_log_file(name, testing=False) -> Optional[str]: # pragma: no cover
"""
Returns the filename of the last logfile with `name`.
Useful when you want to read the logfile of another TimeBench service.
"""
last_logs = _get_latest_log_files(name, testing=testing)
return last_logs[0] if last_logs else None
def _create_stderr_handler() -> logging.Handler: # pragma: no cover
stderr_handler = logging.StreamHandler(stream=sys.stderr)
stderr_handler.setFormatter(_create_human_formatter())
return stderr_handler
def _create_file_handler(
name, testing=False, log_json=False
) -> logging.Handler: # pragma: no cover
log_dir = dirs.get_log_dir(name)
# Set logfile path and name
global log_file_path
# Should result in something like:
# $LOG_DIR/aw-server_testing_2017-01-05T00:21:39.log
file_ext = ".log.json" if log_json else ".log"
now_str = str(datetime.now().replace(microsecond=0).isoformat()).replace(":", "-")
log_name = name + "_" + ("testing_" if testing else "") + now_str + file_ext
log_file_path = os.path.join(log_dir, log_name)
fh = logging.FileHandler(log_file_path, mode="w")
if log_json:
fh.setFormatter(_create_json_formatter())
else:
fh.setFormatter(_create_human_formatter())
return fh
def _create_human_formatter() -> logging.Formatter: # pragma: no cover
return logging.Formatter(
"%(asctime)s [%(levelname)-5s]: %(message)s (%(name)s:%(lineno)s)",
"%Y-%m-%d %H:%M:%S",
)
def _create_json_formatter() -> logging.Formatter: # pragma: no cover
supported_keys = [
"asctime",
# 'created',
"filename",
"funcName",
"levelname",
# 'levelno',
"lineno",
"module",
# 'msecs',
"message",
"name",
"pathname",
# 'process',
# 'processName',
# 'relativeCreated',
# 'thread',
# 'threadName'
]
def log_format(x):
"""Used to give JsonFormatter proper parameter format"""
return ["%({0:s})".format(i) for i in x]
custom_format = " ".join(log_format(supported_keys))
return jsonlogger.JsonFormatter(custom_format)
| 29.877698 | 106 | 0.666747 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | minhlt9196/activeseconds-aw-core | aw_core/log.py | 4,153 | Python |
from txgcv.base.algorithm import Algorithm
from txgcv.base.parameter import Parameter
__all__ = ["Algorithm", "Parameter"] | 24.8 | 42 | 0.798387 | [
"BSD-3-Clause"
] | dongyaoli10x/txgcv | txgcv/base/__init__.py | 124 | Python |
# -----------------------------------------------------------------------------
# Task 002
print('''
Задача 002:
===========
Каждый следующий элемент ряда Фибоначчи получается при сложении двух
предыдущих. Начиная с 1 и 2, первые 10 элементов будут:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
\x1b[33m
Найдите сумму всех четных элементов ряда Фибоначчи,
которые не превышают четыре миллиона.
\x1b[0m
''')
# Способ 1. Механический. Перебор. пока Единственный
def fib_sum(limit):
"""
Считает сумму положительных чисел Фибаначи, которые меньше указанного значения
"""
u1 = 1
u2 = 2
summ = 2
_u = u1 + u2
while _u < limit:
if _u % 2 == 0:
summ += _u
u1 = u2
u2 = _u
_u = u1 + u2
return summ
x = 4000000
result = fib_sum(x)
print("{:,}".format(result))
# сгенерированный ряд
# >>> [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987,
# 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025,
# 121393, 196418, 317811, 514229, 832040, 1346269,
# 2178309, 3524578]
| 22.851064 | 82 | 0.554935 | [
"MIT"
] | z1365/euler | t002-fibanatchi.py | 1,383 | Python |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("python/requirements.txt", "r") as fh:
install_requires = fh.read().splitlines()
setuptools.setup(
name="fastquant",
version="0.1.3.16",
author="Lorenzo Ampil",
author_email="lorenzo.ampil@gmail.com",
description="Bringing data driven investments to the mainstream",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/enzoampil/fastquant",
packages=setuptools.find_packages(where="./python", exclude=["docs", "tests"]),
package_dir={"": "python"},
package_data={"fastquant": ["python/data/*"]},
include_package_data=True,
scripts=["python/scripts/get_disclosures", "python/scripts/update_cache"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
],
install_requires=install_requires,
)
| 35.896552 | 83 | 0.684918 | [
"MIT"
] | beatobongco/fastquant | setup.py | 1,041 | Python |
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
import requests.exceptions
from urlparse import urlsplit
from collections import deque
import re
def crawl(url):
new_urls = deque(["http://{}".format(url)])
processed_urls = set()
emails = []
while len(new_urls):
url = new_urls.popleft()
processed_urls.add(url)
parts = urlsplit(url)
base_url = "{0.scheme}://{0.netloc}".format(parts)
path = url[:url.rfind('/') + 1] if '/' in parts.path else url
print "Processing {}".format(url)
try:
response = requests.get(url)
except (requests.exceptions.MissingSchema, \
requests.exceptions.ConnectionError):
continue
new_emails = re.findall(r"\w+[.|\w]\w+@\w+[.]\w+[.|\w+]\w+", \
response.text, re.I)
for addr in new_emails:
emails.append(addr)
soup = BeautifulSoup(response.text)
for anchor in soup.find_all('a'):
link = anchor.attrs["href"] if "href" in anchor.attrs else ''
if link.startswith('/'):
link = base_url + link
elif not link.startswith("http"):
link = path + link
if not link in new_urls and not link in processed_urls:
new_urls.append(link)
with open("emails.txt", 'w') as f:
f.write('\n'.join(emails))
| 30.428571 | 77 | 0.537894 | [
"Unlicense"
] | vesche/snippets | autocapstone/email_crawl.py | 1,491 | Python |
from gensim.models import word2vec
print("Learning Word2Vec embeddings")
tok_file = 'data/preprocessed/lemmatized.txt'
sentences = word2vec.LineSentence(tok_file)
model = word2vec.Word2Vec(sentences=sentences, size=10, window=5, workers=3, min_count=1)
model.wv.save_word2vec_format('models/vejica_word2vec.emb')
print("Saved Word2Vec format") | 38.333333 | 89 | 0.811594 | [
"MIT"
] | gregorkrz/vejice | scripts/experiments/learn_word2vec.py | 345 | Python |
##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
## This class forms the base class for all uis which manipulate PathFilters.
class PathFilterWidget( GafferUI.Widget ) :
def __init__( self, topLevelWidget, pathFilter, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__pathFilter = pathFilter
self.__pathFilterChangedConnection = self.__pathFilter.changedSignal().connect( Gaffer.WeakMethod( self.__pathFilterChanged ) )
## Returns the PathFilter object this UI represents.
def pathFilter( self ) :
return self.__pathFilter
## Must be implemented by subclasses to update the UI when the filter
# changes in some way.
def _updateFromPathFilter( self ) :
raise NotImplementedError
def __pathFilterChanged( self, pathFilter ) :
assert( pathFilter is self.__pathFilter )
self._updateFromPathFilter()
## Creates a PathFilterWidget instance for the specified pathFilter. Returns None
# if no suitable widget exists.
@classmethod
def create( cls, pathFilter ) :
visible = True
with IECore.IgnoredExceptions( KeyError ) :
visible = pathFilter.userData()["UI"]["visible"]
if not visible :
return None
c = pathFilter.__class__
while c is not None :
creator = cls.__typesToCreators.get( c, None )
if creator is not None :
return creator( pathFilter )
c = c.__bases__[0] if c.__bases__ else None
return None
## Registers a subclass of PathFilterWidget to be used with a specific pathFilter type.
@classmethod
def registerType( cls, pathFilterClass, widgetCreationFunction ) :
cls.__typesToCreators[pathFilterClass] = widgetCreationFunction
__typesToCreators = {}
class BasicPathFilterWidget( PathFilterWidget ) :
def __init__( self, pathFilter ) :
self.__checkBox = GafferUI.BoolWidget( str( pathFilter ) )
PathFilterWidget.__init__( self, self.__checkBox, pathFilter )
self.__stateChangedConnection = self.__checkBox.stateChangedSignal().connect( Gaffer.WeakMethod( self.__stateChanged ) )
self._updateFromPathFilter()
def _updateFromPathFilter( self ) :
label = str( self.pathFilter() )
with IECore.IgnoredExceptions( KeyError ) :
label = self.pathFilter().userData()["UI"]["label"]
self.__checkBox.setText( label )
invertEnabled = False
with IECore.IgnoredExceptions( KeyError ) :
invertEnabled = self.pathFilter().userData()["UI"]["invertEnabled"]
self.__checkBox.setState( self.pathFilter().getEnabled() is not invertEnabled )
def __stateChanged( self, checkBox ) :
invertEnabled = False
with IECore.IgnoredExceptions( KeyError ) :
invertEnabled = self.pathFilter().userData()["UI"]["invertEnabled"]
self.pathFilter().setEnabled( checkBox.getState() is not invertEnabled )
PathFilterWidget.registerType( Gaffer.PathFilter, BasicPathFilterWidget )
| 34.787879 | 129 | 0.724303 | [
"BSD-3-Clause"
] | cedriclaunay/gaffer | python/GafferUI/PathFilterWidget.py | 4,592 | Python |
from sympy.core.backend import sin, cos, tan, pi, symbols, Matrix, zeros, S
from sympy.physics.mechanics import (Particle, Point, ReferenceFrame,
RigidBody, Vector)
from sympy.physics.mechanics import (angular_momentum, dynamicsymbols,
inertia, inertia_of_point_mass,
kinetic_energy, linear_momentum,
outer, potential_energy, msubs,
find_dynamicsymbols, Lagrangian)
from sympy.physics.mechanics.functions import gravity, center_of_mass
from sympy.physics.vector.vector import Vector
from sympy.utilities.pytest import raises
Vector.simp = True
q1, q2, q3, q4, q5 = symbols('q1 q2 q3 q4 q5')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = A.orientnew('B', 'Axis', [q2, A.x])
C = B.orientnew('C', 'Axis', [q3, B.y])
def test_inertia():
N = ReferenceFrame('N')
ixx, iyy, izz = symbols('ixx iyy izz')
ixy, iyz, izx = symbols('ixy iyz izx')
assert inertia(N, ixx, iyy, izz) == (ixx * (N.x | N.x) + iyy *
(N.y | N.y) + izz * (N.z | N.z))
assert inertia(N, 0, 0, 0) == 0 * (N.x | N.x)
raises(TypeError, lambda: inertia(0, 0, 0, 0))
assert inertia(N, ixx, iyy, izz, ixy, iyz, izx) == (ixx * (N.x | N.x) +
ixy * (N.x | N.y) + izx * (N.x | N.z) + ixy * (N.y | N.x) + iyy *
(N.y | N.y) + iyz * (N.y | N.z) + izx * (N.z | N.x) + iyz * (N.z |
N.y) + izz * (N.z | N.z))
def test_inertia_of_point_mass():
r, s, t, m = symbols('r s t m')
N = ReferenceFrame('N')
px = r * N.x
I = inertia_of_point_mass(m, px, N)
assert I == m * r**2 * (N.y | N.y) + m * r**2 * (N.z | N.z)
py = s * N.y
I = inertia_of_point_mass(m, py, N)
assert I == m * s**2 * (N.x | N.x) + m * s**2 * (N.z | N.z)
pz = t * N.z
I = inertia_of_point_mass(m, pz, N)
assert I == m * t**2 * (N.x | N.x) + m * t**2 * (N.y | N.y)
p = px + py + pz
I = inertia_of_point_mass(m, p, N)
assert I == (m * (s**2 + t**2) * (N.x | N.x) -
m * r * s * (N.x | N.y) -
m * r * t * (N.x | N.z) -
m * r * s * (N.y | N.x) +
m * (r**2 + t**2) * (N.y | N.y) -
m * s * t * (N.y | N.z) -
m * r * t * (N.z | N.x) -
m * s * t * (N.z | N.y) +
m * (r**2 + s**2) * (N.z | N.z))
def test_linear_momentum():
N = ReferenceFrame('N')
Ac = Point('Ac')
Ac.set_vel(N, 25 * N.y)
I = outer(N.x, N.x)
A = RigidBody('A', Ac, N, 20, (I, Ac))
P = Point('P')
Pa = Particle('Pa', P, 1)
Pa.point.set_vel(N, 10 * N.x)
raises(TypeError, lambda: linear_momentum(A, A, Pa))
raises(TypeError, lambda: linear_momentum(N, N, Pa))
assert linear_momentum(N, A, Pa) == 10 * N.x + 500 * N.y
def test_angular_momentum_and_linear_momentum():
"""A rod with length 2l, centroidal inertia I, and mass M along with a
particle of mass m fixed to the end of the rod rotate with an angular rate
of omega about point O which is fixed to the non-particle end of the rod.
The rod's reference frame is A and the inertial frame is N."""
m, M, l, I = symbols('m, M, l, I')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
a = ReferenceFrame('a')
O = Point('O')
Ac = O.locatenew('Ac', l * N.x)
P = Ac.locatenew('P', l * N.x)
O.set_vel(N, 0 * N.x)
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
A = RigidBody('A', Ac, a, M, (I * outer(N.z, N.z), Ac))
expected = 2 * m * omega * l * N.y + M * l * omega * N.y
assert linear_momentum(N, A, Pa) == expected
raises(TypeError, lambda: angular_momentum(N, N, A, Pa))
raises(TypeError, lambda: angular_momentum(O, O, A, Pa))
raises(TypeError, lambda: angular_momentum(O, N, O, Pa))
expected = (I + M * l**2 + 4 * m * l**2) * omega * N.z
assert angular_momentum(O, N, A, Pa) == expected
def test_kinetic_energy():
m, M, l1 = symbols('m M l1')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
raises(TypeError, lambda: kinetic_energy(Pa, Pa, A))
raises(TypeError, lambda: kinetic_energy(N, N, A))
assert 0 == (kinetic_energy(N, Pa, A) - (M*l1**2*omega**2/2
+ 2*l1**2*m*omega**2 + omega**2/2)).expand()
def test_potential_energy():
m, M, l1, g, h, H = symbols('m M l1 g h H')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
Pa.potential_energy = m * g * h
A.potential_energy = M * g * H
assert potential_energy(A, Pa) == m * g * h + M * g * H
def test_Lagrangian():
M, m, g, h = symbols('M m g h')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
P = O.locatenew('P', 1 * N.x)
P.set_vel(N, 10 * N.x)
Pa = Particle('Pa', P, 1)
Ac = O.locatenew('Ac', 2 * N.y)
Ac.set_vel(N, 5 * N.y)
a = ReferenceFrame('a')
a.set_ang_vel(N, 10 * N.z)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, 20, (I, Ac))
Pa.potential_energy = m * g * h
A.potential_energy = M * g * h
raises(TypeError, lambda: Lagrangian(A, A, Pa))
raises(TypeError, lambda: Lagrangian(N, N, Pa))
def test_msubs():
a, b = symbols('a, b')
x, y, z = dynamicsymbols('x, y, z')
# Test simple substitution
expr = Matrix([[a*x + b, x*y.diff() + y],
[x.diff().diff(), z + sin(z.diff())]])
sol = Matrix([[a + b, y],
[x.diff().diff(), 1]])
sd = {x: 1, z: 1, z.diff(): 0, y.diff(): 0}
assert msubs(expr, sd) == sol
# Test smart substitution
expr = cos(x + y)*tan(x + y) + b*x.diff()
sd = {x: 0, y: pi/2, x.diff(): 1}
assert msubs(expr, sd, smart=True) == b + 1
N = ReferenceFrame('N')
v = x*N.x + y*N.y
d = x*(N.x|N.x) + y*(N.y|N.y)
v_sol = 1*N.y
d_sol = 1*(N.y|N.y)
sd = {x: 0, y: 1}
assert msubs(v, sd) == v_sol
assert msubs(d, sd) == d_sol
def test_find_dynamicsymbols():
a, b = symbols('a, b')
x, y, z = dynamicsymbols('x, y, z')
expr = Matrix([[a*x + b, x*y.diff() + y],
[x.diff().diff(), z + sin(z.diff())]])
# Test finding all dynamicsymbols
sol = {x, y.diff(), y, x.diff().diff(), z, z.diff()}
assert find_dynamicsymbols(expr) == sol
# Test finding all but those in sym_list
exclude_list = [x, y, z]
sol = {y.diff(), x.diff().diff(), z.diff()}
assert find_dynamicsymbols(expr, exclude=exclude_list) == sol
# Test finding all dynamicsymbols in a vector with a given reference frame
d, e, f = dynamicsymbols('d, e, f')
A = ReferenceFrame('A')
v = d * A.x + e * A.y + f * A.z
sol = {d, e, f}
assert find_dynamicsymbols(v, reference_frame=A) == sol
# Test if a ValueError is raised on supplying only a vector as input
raises(ValueError, lambda: find_dynamicsymbols(v))
def test_gravity():
N = ReferenceFrame('N')
m, M, g = symbols('m M g')
F1, F2 = dynamicsymbols('F1 F2')
po = Point('po')
pa = Particle('pa', po, m)
A = ReferenceFrame('A')
P = Point('P')
I = outer(A.x, A.x)
B = RigidBody('B', P, A, M, (I, P))
forceList = [(po, F1), (P, F2)]
forceList.extend(gravity(g*N.y, pa, B))
l = [(po, F1), (P, F2), (po, g*m*N.y), (P, g*M*N.y)]
for i in range(len(l)):
for j in range(len(l[i])):
assert forceList[i][j] == l[i][j]
# This function tests the center_of_mass() function
# that was added in PR #14758 to compute the center of
# mass of a system of bodies.
def test_center_of_mass():
a = ReferenceFrame('a')
m = symbols('m', real=True)
p1 = Particle('p1', Point('p1_pt'), S(1))
p2 = Particle('p2', Point('p2_pt'), S(2))
p3 = Particle('p3', Point('p3_pt'), S(3))
p4 = Particle('p4', Point('p4_pt'), m)
b_f = ReferenceFrame('b_f')
b_cm = Point('b_cm')
mb = symbols('mb')
b = RigidBody('b', b_cm, b_f, mb, (outer(b_f.x, b_f.x), b_cm))
p2.point.set_pos(p1.point, a.x)
p3.point.set_pos(p1.point, a.x + a.y)
p4.point.set_pos(p1.point, a.y)
b.masscenter.set_pos(p1.point, a.y + a.z)
point_o=Point('o')
point_o.set_pos(p1.point, center_of_mass(p1.point, p1, p2, p3, p4, b))
expr = 5/(m + mb + 6)*a.x + (m + mb + 3)/(m + mb + 6)*a.y + mb/(m + mb + 6)*a.z
assert point_o.pos_from(p1.point)-expr == 0
| 36.035573 | 83 | 0.530767 | [
"BSD-3-Clause"
] | Abhi58/sympy | sympy/physics/mechanics/tests/test_functions.py | 9,117 | Python |
import cv2
import numpy as np
import matplotlib.pyplot as plt
def Canny(img):
# Gray scale
def BGR2GRAY(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
# Gray scale
out = 0.2126 * r + 0.7152 * g + 0.0722 * b
out = out.astype(np.uint8)
return out
# Gaussian filter for grayscale
def gaussian_filter(img, K_size=3, sigma=1.3):
if len(img.shape) == 3:
H, W, C = img.shape
gray = False
else:
img = np.expand_dims(img, axis=-1)
H, W, C = img.shape
gray = True
## Zero padding
pad = K_size // 2
out = np.zeros([H + pad * 2, W + pad * 2, C], dtype=np.float)
out[pad : pad + H, pad : pad + W] = img.copy().astype(np.float)
## prepare Kernel
K = np.zeros((K_size, K_size), dtype=np.float)
for x in range(-pad, -pad + K_size):
for y in range(-pad, -pad + K_size):
K[y + pad, x + pad] = np.exp( - (x ** 2 + y ** 2) / (2 * sigma * sigma))
#K /= (sigma * np.sqrt(2 * np.pi))
K /= (2 * np.pi * sigma * sigma)
K /= K.sum()
tmp = out.copy()
# filtering
for y in range(H):
for x in range(W):
for c in range(C):
out[pad + y, pad + x, c] = np.sum(K * tmp[y : y + K_size, x : x + K_size, c])
out = np.clip(out, 0, 255)
out = out[pad : pad + H, pad : pad + W]
out = out.astype(np.uint8)
if gray:
out = out[..., 0]
return out
# sobel filter
def sobel_filter(img, K_size=3):
if len(img.shape) == 3:
H, W, C = img.shape
else:
H, W = img.shape
# Zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad : pad + H, pad : pad + W] = img.copy().astype(np.float)
tmp = out.copy()
out_v = out.copy()
out_h = out.copy()
## Sobel vertical
Kv = [[1., 2., 1.],[0., 0., 0.], [-1., -2., -1.]]
## Sobel horizontal
Kh = [[1., 0., -1.],[2., 0., -2.],[1., 0., -1.]]
# filtering
for y in range(H):
for x in range(W):
out_v[pad + y, pad + x] = np.sum(Kv * (tmp[y : y + K_size, x : x + K_size]))
out_h[pad + y, pad + x] = np.sum(Kh * (tmp[y : y + K_size, x : x + K_size]))
out_v = np.clip(out_v, 0, 255)
out_h = np.clip(out_h, 0, 255)
out_v = out_v[pad : pad + H, pad : pad + W]
out_v = out_v.astype(np.uint8)
out_h = out_h[pad : pad + H, pad : pad + W]
out_h = out_h.astype(np.uint8)
return out_v, out_h
def get_edge_angle(fx, fy):
# get edge strength
edge = np.sqrt(np.power(fx.astype(np.float32), 2) + np.power(fy.astype(np.float32), 2))
edge = np.clip(edge, 0, 255)
fx = np.maximum(fx, 1e-10)
#fx[np.abs(fx) <= 1e-5] = 1e-5
# get edge angle
angle = np.arctan(fy / fx)
return edge, angle
def angle_quantization(angle):
angle = angle / np.pi * 180
angle[angle < -22.5] = 180 + angle[angle < -22.5]
_angle = np.zeros_like(angle, dtype=np.uint8)
_angle[np.where(angle <= 22.5)] = 0
_angle[np.where((angle > 22.5) & (angle <= 67.5))] = 45
_angle[np.where((angle > 67.5) & (angle <= 112.5))] = 90
_angle[np.where((angle > 112.5) & (angle <= 157.5))] = 135
return _angle
def non_maximum_suppression(angle, edge):
H, W = angle.shape
_edge = edge.copy()
for y in range(H):
for x in range(W):
if angle[y, x] == 0:
dx1, dy1, dx2, dy2 = -1, 0, 1, 0
elif angle[y, x] == 45:
dx1, dy1, dx2, dy2 = -1, 1, 1, -1
elif angle[y, x] == 90:
dx1, dy1, dx2, dy2 = 0, -1, 0, 1
elif angle[y, x] == 135:
dx1, dy1, dx2, dy2 = -1, -1, 1, 1
if x == 0:
dx1 = max(dx1, 0)
dx2 = max(dx2, 0)
if x == W-1:
dx1 = min(dx1, 0)
dx2 = min(dx2, 0)
if y == 0:
dy1 = max(dy1, 0)
dy2 = max(dy2, 0)
if y == H-1:
dy1 = min(dy1, 0)
dy2 = min(dy2, 0)
if max(max(edge[y, x], edge[y + dy1, x + dx1]), edge[y + dy2, x + dx2]) != edge[y, x]:
_edge[y, x] = 0
return _edge
def hysterisis(edge, HT=100, LT=30):
H, W = edge.shape
# Histeresis threshold
edge[edge >= HT] = 255
edge[edge <= LT] = 0
_edge = np.zeros((H + 2, W + 2), dtype=np.float32)
_edge[1 : H + 1, 1 : W + 1] = edge
## 8 - Nearest neighbor
nn = np.array(((1., 1., 1.), (1., 0., 1.), (1., 1., 1.)), dtype=np.float32)
for y in range(1, H+2):
for x in range(1, W+2):
if _edge[y, x] < LT or _edge[y, x] > HT:
continue
if np.max(_edge[y-1:y+2, x-1:x+2] * nn) >= HT:
_edge[y, x] = 255
else:
_edge[y, x] = 0
edge = _edge[1:H+1, 1:W+1]
return edge
# grayscale
gray = BGR2GRAY(img)
# gaussian filtering
gaussian = gaussian_filter(gray, K_size=5, sigma=1.4)
# sobel filtering
fy, fx = sobel_filter(gaussian, K_size=3)
# get edge strength, angle
edge, angle = get_edge_angle(fx, fy)
# angle quantization
angle = angle_quantization(angle)
# non maximum suppression
edge = non_maximum_suppression(angle, edge)
# hysterisis threshold
out = hysterisis(edge, 100, 30)
return out
def Hough_Line_step1(edge):
## Voting
def voting(edge):
H, W = edge.shape
drho = 1
dtheta = 1
# get rho max length
rho_max = np.ceil(np.sqrt(H ** 2 + W ** 2)).astype(np.int)
# hough table
hough = np.zeros((rho_max * 2, 180), dtype=np.int)
# get index of edge
ind = np.where(edge == 255)
## hough transformation
for y, x in zip(ind[0], ind[1]):
for theta in range(0, 180, dtheta):
# get polar coordinat4s
t = np.pi / 180 * theta
rho = int(x * np.cos(t) + y * np.sin(t))
# vote
hough[rho + rho_max, theta] += 1
out = hough.astype(np.uint8)
return out
# voting
out = voting(edge)
return out
# Read image
img = cv2.imread("thorino.jpg").astype(np.float32)
# Canny
edge = Canny(img)
# Hough
out = Hough_Line_step1(edge)
out = out.astype(np.uint8)
# Save result
#cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 22.419231 | 91 | 0.553954 | [
"MIT"
] | lusi1990/ImageProcessing100Wen | Question_41_50/answers_py/answer_44.py | 5,829 | Python |
"""
timedelta support tools
"""
import numpy as np
from pandas._libs.tslibs import NaT
from pandas._libs.tslibs.timedeltas import Timedelta, parse_timedelta_unit
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.arrays.timedeltas import sequence_to_td64ns
def to_timedelta(arg, unit=None, errors="raise"):
"""
Convert argument to timedelta.
Timedeltas are absolute differences in times, expressed in difference
units (e.g. days, hours, minutes, seconds). This method converts
an argument from a recognized timedelta format / value into
a Timedelta type.
Parameters
----------
arg : str, timedelta, list-like or Series
The data to be converted to timedelta. The character M by itself,
e.g. '1M', is treated as minute, not month. The characters Y and y
are treated as the mean length of the Gregorian calendar year -
365.2425 days or 365 days 5 hours 49 minutes 12 seconds.
unit : str, optional
Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``.
Possible values:
* 'W'
* 'D' / 'days' / 'day'
* 'hours' / 'hour' / 'hr' / 'h'
* 'm' / 'minute' / 'min' / 'minutes' / 'T'
* 'S' / 'seconds' / 'sec' / 'second'
* 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L'
* 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U'
* 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N'
.. versionchanged:: 1.1.0
Must not be specified when `arg` context strings and
``errors="raise"``.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaT.
- If 'ignore', then invalid parsing will return the input.
Returns
-------
timedelta64 or numpy.array of timedelta64
Output type returned if parsing succeeded.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
convert_dtypes : Convert dtypes.
Examples
--------
Parsing a single string to a Timedelta:
>>> pd.to_timedelta('1 days 06:05:01.00003')
Timedelta('1 days 06:05:01.000030')
>>> pd.to_timedelta('15.5us')
Timedelta('0 days 00:00:00.000015500')
Parsing a list or array of strings:
>>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> pd.to_timedelta(np.arange(5), unit='s')
TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',
'0 days 00:00:03', '0 days 00:00:04'],
dtype='timedelta64[ns]', freq=None)
>>> pd.to_timedelta(np.arange(5), unit='d')
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
"""
if unit is not None:
unit = parse_timedelta_unit(unit)
if errors not in ("ignore", "raise", "coerce"):
raise ValueError("errors must be one of 'ignore', 'raise', or 'coerce'}")
if unit in {"Y", "y", "M"}:
raise ValueError(
"Units 'M', 'Y', and 'y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
if arg is None:
return arg
elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, unit=unit, errors=errors)
return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name)
elif isinstance(arg, np.ndarray) and arg.ndim == 0:
# extract array scalar and process below
arg = arg.item()
elif is_list_like(arg) and getattr(arg, "ndim", 1) == 1:
return _convert_listlike(arg, unit=unit, errors=errors)
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, timedelta, list, tuple, 1-d array, or Series"
)
if isinstance(arg, str) and unit is not None:
raise ValueError("unit must not be specified if the input is/contains a str")
# ...so it must be a scalar value. Return scalar.
return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors)
def _coerce_scalar_to_timedelta_type(r, unit="ns", errors="raise"):
"""Convert string 'r' to a timedelta object."""
try:
result = Timedelta(r, unit)
except ValueError:
if errors == "raise":
raise
elif errors == "ignore":
return r
# coerce
result = NaT
return result
def _convert_listlike(arg, unit=None, errors="raise", name=None):
"""Convert a list of objects to a timedelta index object."""
if isinstance(arg, (list, tuple)) or not hasattr(arg, "dtype"):
# This is needed only to ensure that in the case where we end up
# returning arg (errors == "ignore"), and where the input is a
# generator, we return a useful list-like instead of a
# used-up generator
arg = np.array(list(arg), dtype=object)
try:
value = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0]
except ValueError:
if errors == "ignore":
return arg
else:
# This else-block accounts for the cases when errors='raise'
# and errors='coerce'. If errors == 'raise', these errors
# should be raised. If errors == 'coerce', we shouldn't
# expect any errors to be raised, since all parsing errors
# cause coercion to pd.NaT. However, if an error / bug is
# introduced that causes an Exception to be raised, we would
# like to surface it.
raise
from pandas import TimedeltaIndex
value = TimedeltaIndex(value, unit="ns", name=name)
return value
| 36.401163 | 85 | 0.615557 | [
"BSD-3-Clause"
] | cdeil/pandas | pandas/core/tools/timedeltas.py | 6,261 | Python |
from functools import reduce
from itertools import chain
from typing import Optional, Set
import pandas as pd
from sqlalchemy import (
func,
or_,
orm,
sql,
)
import fiber
from fiber.condition.base import _BaseCondition
from fiber.database import (
compile_sqla,
read_with_progress,
)
from fiber.database import get_engine
from fiber.database.table import Table
def _case_insensitive_like(column: str, value):
return func.upper(column).like(value.upper())
def _multi_like_clause(column: str, value_or_values):
values = (
[value_or_values]
if isinstance(value_or_values, str)
else value_or_values
)
return reduce(
or_,
[_case_insensitive_like(column, value) for value in values]
)
class _DatabaseCondition(_BaseCondition):
"""
The DatabaseCondition adds functionality to the BaseCondition which
is needed to run queries against a database. It also allows to combine
SQL Statements into one to optimize performance. It should only be used by
developers and not by end-users. It builds the basis for specific
conditions like Diagnosis, VitalSign, ...
It should be possible to use this for other databases that use MRNs by
adjusting the engine. Problems one would need to look into is that database
conditions of different DBs are only combined as BaseConditions in
``__and__``, ``__or__``.
"""
engine = get_engine()
def __init__(
self,
mrns: Optional[Set[str]] = None,
dimensions: Optional[Set[str]] = None,
clause=None,
data_columns=None,
**kwargs
):
"""
Args:
mrns: Set of MRN-Strings for which the condition is true.
children: List of child conditions which were combined with an
operator.
operator: String representing the combination of the child
condition (e.g. ``_BaseCondition.AND``)
dimensions: A set of tables that need to be joined on the
``base_table``
clause: The SQLAlchemy clause of the current
condition to select patients.
data_columns: The SQLAlchemy data_columns that should
be returned when ``.get_data()`` is called.
"""
super().__init__(**kwargs)
self.dimensions = dimensions or set()
# sql.true() acts as an 'empty' initializer for the clause
self._clause = sql.true() if clause is None else clause
self.data_columns = data_columns or []
@property
def base_table(self) -> Table:
"""
Must be set by subclasses to the database table which the class uses to
select patients and data. This is also used to optimize queries on the
same table.
"""
raise NotImplementedError
@property
def _default_columns(self):
"""
Must be set by subclasses.
This should return an array of columns which are in the result table of
``._create_query()``. These columns will be returned by default when
``.get_data()`` is called.
"""
raise NotImplementedError
@property
def mrn_column(self):
"""
Must be set by subclasses.
This is used to specify the column in the result table of
``._create_query()`` which is holding the MRNs.
"""
raise NotImplementedError
@property
def age_column(self):
"""
Must be set by subclasses.
This is used to specify the column in the result table of
``._create_query()`` which is holding the age in days.
"""
raise NotImplementedError
@property
def data_columns(self):
"""
Returns columns which are in the result table of
``._create_query()``. These columns will be returned when
``.get_data()`` is called.
"""
return [
str(col) for col
in (self._specified_columns or self._default_columns)
]
@data_columns.setter
def data_columns(self, value):
self._specified_columns = value
@property
def clause(self):
"""
Returns the clause of the current condition or runs
``._create_clause()`` to create it.
"""
# TODO recursively create clause of combinable conditions
if not isinstance(self._clause, sql.elements.True_):
return self._clause
else:
return self._create_clause()
def _create_clause(self):
"""
Should be overwritten by subclasses to create a SQLAlchemy clause based
on the defined condition. It is used to select the correct patients.
"""
return sql.true()
def _create_query(self) -> orm.Query:
"""
Must be implemented by subclasses to return an instance of a SQLAlchemy
query which only returns MRNs.
This query should yield all medical record numbers in the
``base_table`` of the condition. It uses the ``.clause`` to select
the relevant patients.
This query is also used by other function which change the selected
columns to get data about the patients.
"""
raise NotImplementedError
def _fetch_mrns(self,
limit: Optional[int] = None):
"""Fetches MRNs from the results of ``._create_query()``."""
q = self._create_query()
if limit:
q = q.limit(limit)
mrn_df = read_with_progress(q.statement, self.engine)
if mrn_df.empty:
mrn_df = pd.DataFrame(columns=['medical_record_number'])
assert len(mrn_df.columns) == 1, '_create_query must return only MRNs'
result = set(
mrn for mrn in
mrn_df.iloc[:, 0]
)
return result
def _fetch_data(self,
included_mrns: Optional[Set] = None,
limit: Optional[int] = None):
"""
Fetches the data defined with ``.data_columns`` for each patient
defined by this condition and via ``included_mrns`` from the results of
``._create_query()``.
"""
q = self._create_query()
if included_mrns:
q = q.filter(self.mrn_column.in_(included_mrns))
if limit:
q = q.limit(limit)
q = q.with_entities(*self.data_columns).distinct()
result = read_with_progress(
q.statement, self.engine, silent=bool(included_mrns))
return result
def example_values(self):
"""
Returns ten values of the current condition.
Example:
>>> Patient(gender='Female', religion='Hindu').example_values()
"""
return self.get_data(limit=10)
def values_per(self, *columns: Set[str]):
"""
Counts occurence of unique values in the specified columns.
"""
return self._grouped_count('*', *columns, label='values')
def patients_per(self, *columns: Set[str]):
"""
Counts distinct patients for unique values in the specified columns.
"""
return self._grouped_count(
self.mrn_column.distinct(),
*columns,
label='patients'
)
def _grouped_count(self,
count_column: str,
*columns: Set[str],
label: Optional[str] = None):
if not columns:
raise ValueError('Supply one or multiple columns as arguments.')
q = self._create_query()
q = q.group_by(
*columns
).with_entities(
*columns
).order_by(
func.count(count_column).label((label or 'count')).desc()
)
return read_with_progress(q.statement, self.engine)
def distinct(self, *columns: Set[str]):
"""Returns distinct values based on the specified ``columns``"""
if not columns:
raise ValueError('Supply one or multiple columns as arguments.')
q = self._create_query()
q = q.with_entities(*columns).distinct()
return read_with_progress(q.statement, self.engine)
def to_dict(self):
obj_dict = super().to_dict()
if self._specified_columns:
obj_dict['data_columns'] = self.data_columns
return obj_dict
@classmethod
def from_dict(cls: _BaseCondition, obj_dict: dict):
obj = super().from_dict(obj_dict)
if 'data_columns' in obj_dict:
obj.data_columns = obj_dict['data_columns']
return obj
def __or__(self, other: _BaseCondition):
"""
The _DatabaseCondition optimizes the SQL statements for ``|`` by
combining the clauses of condition which run on the same database
table. This is done via the ``.base_table`` attribute.
"""
if (
self.base_table == other.base_table
and not (self._mrns or other._mrns)
):
unique_columns = list(dict.fromkeys(
chain(self.data_columns, other.data_columns)
))
return self.__class__(
dimensions=self.dimensions | other.dimensions,
clause=self.clause | other.clause,
data_columns=unique_columns,
children=[self, other],
operator=_BaseCondition.OR,
)
else:
return _BaseCondition(
mrns=self.get_mrns() | other.get_mrns(),
children=[self, other],
operator=_BaseCondition.OR,
)
def __and__(self, other: _BaseCondition):
# The SQL queries could theoretically be combined for AND as well, by
# running them as subqueries and joining on the MRNs
return self.__class__(
mrns=self.get_mrns() & other.get_mrns(),
dimensions=self.dimensions | other.dimensions,
children=[self, other],
operator=_BaseCondition.AND,
)
def __repr__(self):
"""Shows the running query or the resulting MRNs"""
if self._mrns:
return f'{self.__class__.__name__}: {len(self.get_mrns())} mrns'
else:
clause = (
compile_sqla(self.clause, self.engine) if fiber.config.VERBOSE
else '...'
)
return (
f'{self.__class__.__name__} '
f'({clause})'
)
| 32.165138 | 79 | 0.595455 | [
"MIT"
] | hpi-dhc/fiber | fiber/condition/database.py | 10,518 | Python |
#
# PySNMP MIB module PAN-ENTITY-EXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///mnt/d/data/MIBS/text_mibs/paloalto/PAN-ENTITY-EXT-MIB
# Produced by pysmi-0.3.4 at Wed Feb 10 13:07:35 2021
# On host QS-IL-COSTAY platform Linux version 5.4.72-microsoft-standard-WSL2 by user coye
# Using Python version 3.8.5 (default, Jul 28 2020, 12:59:40)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
panModules, = mibBuilder.importSymbols("PAN-GLOBAL-REG", "panModules")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, ModuleIdentity, Integer32, Bits, TimeTicks, IpAddress, ObjectIdentity, Gauge32, NotificationType, Unsigned32, Counter64, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "ModuleIdentity", "Integer32", "Bits", "TimeTicks", "IpAddress", "ObjectIdentity", "Gauge32", "NotificationType", "Unsigned32", "Counter64", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
panEntityMIBModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7))
panEntityMIBModule.setRevisions(('2012-11-05 11:06',))
if mibBuilder.loadTexts: panEntityMIBModule.setLastUpdated('201211051106Z')
if mibBuilder.loadTexts: panEntityMIBModule.setOrganization('Palo Alto Networks')
panEntityMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1))
panEntityMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 2))
panEntityChassisGroup = ObjectIdentity((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 1))
if mibBuilder.loadTexts: panEntityChassisGroup.setStatus('current')
panEntityFRUModuleGroup = ObjectIdentity((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 2))
if mibBuilder.loadTexts: panEntityFRUModuleGroup.setStatus('current')
panEntityFanTrayGroup = ObjectIdentity((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 3))
if mibBuilder.loadTexts: panEntityFanTrayGroup.setStatus('current')
panEntityPowerSupplyGroup = ObjectIdentity((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 4))
if mibBuilder.loadTexts: panEntityPowerSupplyGroup.setStatus('current')
panEntityTotalPowerAvail = MibScalar((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: panEntityTotalPowerAvail.setStatus('current')
panEntityTotalPowerUsed = MibScalar((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: panEntityTotalPowerUsed.setStatus('current')
panEntityFRUModuleTable = MibTable((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 2, 1), )
if mibBuilder.loadTexts: panEntityFRUModuleTable.setStatus('current')
panEntityFRUModuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 2, 1, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: panEntityFRUModuleEntry.setStatus('current')
panEntryFRUModulePowerUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: panEntryFRUModulePowerUsed.setStatus('current')
panEntryFRUModuleNumPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: panEntryFRUModuleNumPorts.setStatus('current')
panEntityFanTrayTable = MibTable((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 3, 1), )
if mibBuilder.loadTexts: panEntityFanTrayTable.setStatus('current')
panEntityFanTrayEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 3, 1, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: panEntityFanTrayEntry.setStatus('current')
panEntryFanTrayPowerUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: panEntryFanTrayPowerUsed.setStatus('current')
panEntityPowerSupplyTable = MibTable((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 4, 1), )
if mibBuilder.loadTexts: panEntityPowerSupplyTable.setStatus('current')
panEntityPowerSupplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 4, 1, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: panEntityPowerSupplyEntry.setStatus('current')
panEntryPowerSupplyPowerCapacity = MibTableColumn((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 1, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: panEntryPowerSupplyPowerCapacity.setStatus('current')
panEntityMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 2, 1))
panEntityMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 2, 2))
panEntityMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 2, 1, 1)).setObjects(("PAN-ENTITY-EXT-MIB", "panEntityMIBChassisGroup"), ("PAN-ENTITY-EXT-MIB", "panEntityMIBFRUModuleGroup"), ("PAN-ENTITY-EXT-MIB", "panEntityMIBFanTrayGroup"), ("PAN-ENTITY-EXT-MIB", "panEntityMIBPowerSupplyGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
panEntityMIBCompliance = panEntityMIBCompliance.setStatus('current')
panEntityMIBChassisGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 2, 2, 1)).setObjects(("PAN-ENTITY-EXT-MIB", "panEntityTotalPowerAvail"), ("PAN-ENTITY-EXT-MIB", "panEntityTotalPowerUsed"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
panEntityMIBChassisGroup = panEntityMIBChassisGroup.setStatus('current')
panEntityMIBFRUModuleGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 2, 2, 2)).setObjects(("PAN-ENTITY-EXT-MIB", "panEntryFRUModulePowerUsed"), ("PAN-ENTITY-EXT-MIB", "panEntryFRUModuleNumPorts"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
panEntityMIBFRUModuleGroup = panEntityMIBFRUModuleGroup.setStatus('current')
panEntityMIBFanTrayGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 2, 2, 3)).setObjects(("PAN-ENTITY-EXT-MIB", "panEntryFanTrayPowerUsed"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
panEntityMIBFanTrayGroup = panEntityMIBFanTrayGroup.setStatus('current')
panEntityMIBPowerSupplyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 25461, 1, 1, 7, 2, 2, 4)).setObjects(("PAN-ENTITY-EXT-MIB", "panEntryPowerSupplyPowerCapacity"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
panEntityMIBPowerSupplyGroup = panEntityMIBPowerSupplyGroup.setStatus('current')
mibBuilder.exportSymbols("PAN-ENTITY-EXT-MIB", panEntityMIBPowerSupplyGroup=panEntityMIBPowerSupplyGroup, panEntityMIBConformance=panEntityMIBConformance, panEntityPowerSupplyEntry=panEntityPowerSupplyEntry, panEntityPowerSupplyGroup=panEntityPowerSupplyGroup, panEntityTotalPowerAvail=panEntityTotalPowerAvail, panEntityMIBCompliances=panEntityMIBCompliances, panEntryFRUModuleNumPorts=panEntryFRUModuleNumPorts, panEntityFanTrayGroup=panEntityFanTrayGroup, panEntryFanTrayPowerUsed=panEntryFanTrayPowerUsed, panEntryPowerSupplyPowerCapacity=panEntryPowerSupplyPowerCapacity, panEntityFanTrayEntry=panEntityFanTrayEntry, panEntityMIBFanTrayGroup=panEntityMIBFanTrayGroup, panEntityFanTrayTable=panEntityFanTrayTable, panEntityMIBCompliance=panEntityMIBCompliance, panEntityFRUModuleEntry=panEntityFRUModuleEntry, panEntityMIBFRUModuleGroup=panEntityMIBFRUModuleGroup, panEntityMIBModule=panEntityMIBModule, panEntityMIBChassisGroup=panEntityMIBChassisGroup, panEntityMIBObjects=panEntityMIBObjects, panEntityFRUModuleGroup=panEntityFRUModuleGroup, panEntityFRUModuleTable=panEntityFRUModuleTable, panEntityTotalPowerUsed=panEntityTotalPowerUsed, PYSNMP_MODULE_ID=panEntityMIBModule, panEntityChassisGroup=panEntityChassisGroup, panEntityPowerSupplyTable=panEntityPowerSupplyTable, panEntryFRUModulePowerUsed=panEntryFRUModulePowerUsed, panEntityMIBGroups=panEntityMIBGroups)
| 113.616438 | 1,375 | 0.769472 | [
"Apache-2.0"
] | QualiSystems/cloudshell-firewall-panos | cloudshell/firewall/paloalto/panos/mibs/PAN-ENTITY-EXT-MIB.py | 8,294 | Python |
from setuptools import setup, find_packages
# declare these here since we use them in multiple places
_tests_require = [
'pytest',
'pytest-cov',
'flake8',
]
setup(
# package info
name='cheapskate_bal',
description='Cheapskate labs single/dual plane balancer',
version='0.0.2',
url='http://your/url/here',
author='Kevin Powell',
author_email='kevin@kevinpowell.guru',
packages=find_packages(exclude=['tests', 'tests.*']),
# scripts to install to usr/bin
entry_points={
'console_scripts': [
'csbal=cheapskate_bal.cli:csbal_process',
'csbal_s=cheapskate_bal.cli:csbal_single',
'csbal_dinit=cheapskate_bal.cli:csbal_dual_init',
'csbal_d=cheapskate_bal.cli:csbal_dual_iter'
]
},
# run time requirements
# exact versions are in the requirements.txt file
install_requires=[],
# need this for setup.py test
setup_requires=[
'pytest-runner',
],
# needs this if using setuptools_scm
# use_scm_version=True,
# test dependencies
tests_require=_tests_require,
extras_require={
# this allows us to pip install .[test] for all test dependencies
'test': _tests_require,
}
)
| 23.943396 | 73 | 0.643814 | [
"Unlicense"
] | kevinpowell/balancer | cheapskate_bal/setup.py | 1,269 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Run this script to train a QR-DQN agent in the selected environment
"""
from cnn_deepmind import CNNDeepmind_Multihead
from eqrdqn import QRDQN
from atari_wrappers import make_atari, wrap_deepmind
import pickle
import numpy as np
import matplotlib.pyplot as plt
env = make_atari("BreakoutNoFrameskip-v4",noop=True)
env = wrap_deepmind(env, episode_life=True)
nb_steps = 12500000
agent = QRDQN( env,
CNNDeepmind_Multihead,
n_quantiles=200,
kappa=0,
prior = 0.0001,
replay_start_size=50000,
replay_buffer_size=1000000,
initial_exploration_rate=1,
final_exploration_rate=0.01,
final_exploration_step=1000000,
gamma=0.99,
update_target_frequency=10000,
minibatch_size=32,
learning_rate=5e-5,
adam_epsilon=0.01/32,
update_frequency=4,
logging=True)
agent.learn(timesteps=nb_steps, verbose=True)
agent.save()
| 27.634146 | 67 | 0.60812 | [
"MIT"
] | uncharted-technologies/risk-and-uncertainty | Archive/main/Atari/train_atari.py | 1,133 | Python |
from ..commands.help import HelpCommand
from ..commands.exit import ExitCommand
from ..commands.purchase import PurchaseCommand
class CommandState:
"""
The __state value should not be accessed directly,
instead the get() method should be used.
"""
__state = {
'commands': {
'help': HelpCommand.execute,
'exit': ExitCommand.execute,
'purchase': PurchaseCommand.execute,
},
}
@classmethod
def get(cls, key):
return cls.__state.get(key) | 25.52381 | 58 | 0.617537 | [
"MIT"
] | itsSayantan/pyshop | src/state/CommandState.py | 536 | Python |
#!/bin/bash
from pkg_resources import require
require('numpy')
require('h5py')
import sys, os
import numpy as np
import h5py
cxifilenames = sys.argv[2:]
output_dims = tuple()
print "Using CXI file for dims: ", cxifilenames[0]
with h5py.File(cxifilenames[0], 'r') as cxi:
output_dtype = cxi['entry_1']['data_1']['data'].dtype
output_dims = (len(cxifilenames),
cxi['entry_1']['data_3']['data'].shape[0] * 2,
cxi['entry_1']['data_3']['data'].shape[1])
print output_dims, output_dtype
dset = np.zeros(shape=output_dims, dtype = output_dtype)
for i, cxi_file in enumerate(cxifilenames):
with h5py.File(cxi_file, 'r') as cxi:
print cxi_file
cxi_dset = cxi['entry_1']['data_3']['data']
offset = (i, 0, 0)
print " ", offset, cxi_dset.shape
dset[offset[0], offset[1]:cxi_dset.shape[0]+offset[1], offset[2]:cxi_dset.shape[1]+offset[2]] = cxi_dset
cxi_dset = cxi['entry_1']['data_4']['data']
offset = (i, output_dims[1]/2, 0)
print " ", offset, cxi_dset.shape
dset[offset[0], offset[1]:cxi_dset.shape[0]+offset[1], offset[2]:cxi_dset.shape[1]+offset[2]] = cxi_dset
print "Large dataset created: ", dset
print "min/max/mean value: ", dset.min(), dset.max(), dset.mean()
print "Raising data values by (turning into unsigned dataset): ", abs(dset.min())
unsigned_dset = np.array(dset + abs(dset.min()), dtype=np.uint16)
print "Creating file: ", sys.argv[1]
out = h5py.File(sys.argv[1], 'w')
print "Creating dataset in output file"
out_dset = out.create_dataset('data', data = unsigned_dset)
print "Done. Closing file"
out.close()
| 32.666667 | 112 | 0.644658 | [
"MIT"
] | ulrikpedersen/benchpress | cxi2hdf5.py | 1,666 | Python |
#from sql_gen.sql_gen.filters import *
class Prompter(object):
def __init__(self, template_source):
self.template_source = template_source
def get_prompts(self):
result=[]
for undeclared_var in self.template_source.find_undeclared_variables():
result.append(Prompt(undeclared_var,self.template_source.get_filters(undeclared_var)))
return result
def build_context(self):
prompts = self.get_prompts()
context ={}
for prompt in prompts:
prompt.populate_value(context)
return context
class Prompt:
def __init__(self, variable_name, filter_list):
self.variable_name =variable_name
self.filter_list = filter_list
def get_diplay_text(self):
self.display_text = self.variable_name
for template_filter in self.filter_list:
self.display_text = template_filter.apply(self.display_text);
return self.display_text+": "
def populate_value(self,context):
var =raw_input(self.get_diplay_text())
if var:
context[self.variable_name] = var
| 30.297297 | 98 | 0.67529 | [
"MIT"
] | vecin2/em-dev-tools | build/lib.linux-x86_64-2.7/sql_gen/sql_gen/prompter.py | 1,121 | Python |
import logging
import os
import select
import socket
from typing import Union, List
log = logging.getLogger(__name__)
class Receiver:
def __init__(self, irc_socket: socket.socket, socket_timeout: int) -> None:
self._irc_socket = irc_socket
self._socket_timeout = socket_timeout
try:
self._tc = int(os.popen('stty size', 'r').read().split()[1])
except IndexError:
log.warning("term columns could not be ascertained")
self._tc = 80
@property
def irc_socket(self) -> socket.socket:
return self._irc_socket
@irc_socket.setter
def irc_socket(self, new_socket: socket.socket) -> None:
self._irc_socket = new_socket
def receive_msg(self) -> Union[str, List[str]]:
# Timeout when connection is lost
self._irc_socket.setblocking(False)
ready = select.select([self._irc_socket], [], [], self._socket_timeout)
ircmsg = ""
if ready[0]:
try:
ircmsg = self._irc_socket.recv(2048).decode("UTF-8")
except (OSError, UnicodeDecodeError) as e:
log.error(e)
return "ERROR"
ircmsgs = ircmsg.split('\r\n')
if len(ircmsgs) > 1 and not ircmsgs[len(ircmsgs) - 1]:
del ircmsgs[len(ircmsgs) - 1]
sepmsg = "ircmsg:"
for ircmsg in ircmsgs:
log.info("%s %s", sepmsg, "-" * (self._tc - len(sepmsg) - 30))
log.info(ircmsg)
self._irc_socket.setblocking(True)
return ircmsgs
| 31.06 | 79 | 0.594334 | [
"MIT"
] | LoLei/ircbot | src/receiver/receiver.py | 1,553 | Python |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import netaddr
from oslo_serialization import jsonutils
import six
from nova import exception
from nova.i18n import _
from nova import utils
# Constants for the 'vif_type' field in VIF class
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_IVS = 'ivs'
VIF_TYPE_DVS = 'dvs'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_HW_VEB = 'hw_veb'
VIF_TYPE_HYPERV = 'hyperv'
VIF_TYPE_HOSTDEV = 'hostdev_physical'
VIF_TYPE_IB_HOSTDEV = 'ib_hostdev'
VIF_TYPE_MIDONET = 'midonet'
VIF_TYPE_VHOSTUSER = 'vhostuser'
VIF_TYPE_VROUTER = 'vrouter'
VIF_TYPE_OTHER = 'other'
VIF_TYPE_TAP = 'tap'
VIF_TYPE_MACVTAP = 'macvtap'
VIF_TYPE_AGILIO_OVS = 'agilio_ovs'
VIF_TYPE_BINDING_FAILED = 'binding_failed'
VIF_TYPE_VIF = 'vif'
# Constants for dictionary keys in the 'vif_details' field in the VIF
# class
VIF_DETAILS_PORT_FILTER = 'port_filter'
VIF_DETAILS_OVS_HYBRID_PLUG = 'ovs_hybrid_plug'
VIF_DETAILS_PHYSICAL_NETWORK = 'physical_network'
VIF_DETAILS_BRIDGE_NAME = 'bridge_name'
VIF_DETAILS_OVS_DATAPATH_TYPE = 'datapath_type'
# The following constant defines an SR-IOV related parameter in the
# 'vif_details'. 'profileid' should be used for VIF_TYPE_802_QBH
VIF_DETAILS_PROFILEID = 'profileid'
# The following constant defines an SR-IOV and macvtap related parameter in
# the 'vif_details'. 'vlan' should be used for VIF_TYPE_HW_VEB or
# VIF_TYPE_MACVTAP
VIF_DETAILS_VLAN = 'vlan'
# The following three constants define the macvtap related fields in
# the 'vif_details'.
VIF_DETAILS_MACVTAP_SOURCE = 'macvtap_source'
VIF_DETAILS_MACVTAP_MODE = 'macvtap_mode'
VIF_DETAILS_PHYS_INTERFACE = 'physical_interface'
# Constants for vhost-user related fields in 'vif_details'.
# Sets mode on vhost-user socket, valid values are 'client'
# and 'server'
VIF_DETAILS_VHOSTUSER_MODE = 'vhostuser_mode'
# vhost-user socket path
VIF_DETAILS_VHOSTUSER_SOCKET = 'vhostuser_socket'
# Specifies whether vhost-user socket should be plugged
# into ovs bridge. Valid values are True and False
VIF_DETAILS_VHOSTUSER_OVS_PLUG = 'vhostuser_ovs_plug'
# Specifies whether vhost-user socket should be used to
# create a fp netdevice interface.
VIF_DETAILS_VHOSTUSER_FP_PLUG = 'vhostuser_fp_plug'
# Specifies whether vhost-user socket should be used to
# create a vrouter netdevice interface
# TODO(mhenkel): Consider renaming this to be contrail-specific.
VIF_DETAILS_VHOSTUSER_VROUTER_PLUG = 'vhostuser_vrouter_plug'
# Constants for dictionary keys in the 'vif_details' field that are
# valid for VIF_TYPE_TAP.
VIF_DETAILS_TAP_MAC_ADDRESS = 'mac_address'
# Open vSwitch datapath types.
VIF_DETAILS_OVS_DATAPATH_SYSTEM = 'system'
VIF_DETAILS_OVS_DATAPATH_NETDEV = 'netdev'
# Define supported virtual NIC types. VNIC_TYPE_DIRECT and VNIC_TYPE_MACVTAP
# are used for SR-IOV ports
VNIC_TYPE_NORMAL = 'normal'
VNIC_TYPE_DIRECT = 'direct'
VNIC_TYPE_MACVTAP = 'macvtap'
VNIC_TYPE_DIRECT_PHYSICAL = 'direct-physical'
VNIC_TYPE_BAREMETAL = 'baremetal'
VNIC_TYPE_VIRTIO_FORWARDER = 'virtio-forwarder'
# Define list of ports which needs pci request.
# Note: The macvtap port needs a PCI request as it is a tap interface
# with VF as the lower physical interface.
# Note: Currently, VNIC_TYPE_VIRTIO_FORWARDER assumes a 1:1
# relationship with a VF. This is expected to change in the future.
VNIC_TYPES_SRIOV = (VNIC_TYPE_DIRECT, VNIC_TYPE_MACVTAP,
VNIC_TYPE_DIRECT_PHYSICAL, VNIC_TYPE_VIRTIO_FORWARDER)
# Define list of ports which are passthrough to the guest
# and need a special treatment on snapshot and suspend/resume
VNIC_TYPES_DIRECT_PASSTHROUGH = (VNIC_TYPE_DIRECT,
VNIC_TYPE_DIRECT_PHYSICAL)
# Constants for the 'vif_model' values
VIF_MODEL_VIRTIO = 'virtio'
VIF_MODEL_NE2K_PCI = 'ne2k_pci'
VIF_MODEL_PCNET = 'pcnet'
VIF_MODEL_RTL8139 = 'rtl8139'
VIF_MODEL_E1000 = 'e1000'
VIF_MODEL_E1000E = 'e1000e'
VIF_MODEL_NETFRONT = 'netfront'
VIF_MODEL_SPAPR_VLAN = 'spapr-vlan'
VIF_MODEL_LAN9118 = 'lan9118'
VIF_MODEL_SRIOV = 'sriov'
VIF_MODEL_VMXNET = 'vmxnet'
VIF_MODEL_VMXNET3 = 'vmxnet3'
VIF_MODEL_ALL = (
VIF_MODEL_VIRTIO,
VIF_MODEL_NE2K_PCI,
VIF_MODEL_PCNET,
VIF_MODEL_RTL8139,
VIF_MODEL_E1000,
VIF_MODEL_E1000E,
VIF_MODEL_NETFRONT,
VIF_MODEL_SPAPR_VLAN,
VIF_MODEL_LAN9118,
VIF_MODEL_SRIOV,
VIF_MODEL_VMXNET,
VIF_MODEL_VMXNET3,
)
# these types have been leaked to guests in network_data.json
LEGACY_EXPOSED_VIF_TYPES = (
VIF_TYPE_BRIDGE,
VIF_TYPE_DVS,
VIF_TYPE_HW_VEB,
VIF_TYPE_HYPERV,
VIF_TYPE_OVS,
VIF_TYPE_TAP,
VIF_TYPE_VHOSTUSER,
VIF_TYPE_VIF,
)
# Constant for max length of network interface names
# eg 'bridge' in the Network class or 'devname' in
# the VIF class
NIC_NAME_LEN = 14
class Model(dict):
"""Defines some necessary structures for most of the network models."""
def __repr__(self):
return jsonutils.dumps(self)
def _set_meta(self, kwargs):
# pull meta out of kwargs if it's there
self['meta'] = kwargs.pop('meta', {})
# update meta with any additional kwargs that may exist
self['meta'].update(kwargs)
def get_meta(self, key, default=None):
"""calls get(key, default) on self['meta']."""
return self['meta'].get(key, default)
class IP(Model):
"""Represents an IP address in Nova."""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
self['address'] = address
self['type'] = type
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
# determine version from address if not passed in
if self['address'] and not self['version']:
try:
self['version'] = netaddr.IPAddress(self['address']).version
except netaddr.AddrFormatError:
msg = _("Invalid IP format %s") % self['address']
raise exception.InvalidIpAddressError(msg)
def __eq__(self, other):
keys = ['address', 'type', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def is_in_subnet(self, subnet):
if self['address'] and subnet['cidr']:
return (netaddr.IPAddress(self['address']) in
netaddr.IPNetwork(subnet['cidr']))
else:
return False
@classmethod
def hydrate(cls, ip):
if ip:
return cls(**ip)
return None
class FixedIP(IP):
"""Represents a Fixed IP address in Nova."""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
if not self['type']:
self['type'] = 'fixed'
def add_floating_ip(self, floating_ip):
if floating_ip not in self['floating_ips']:
self['floating_ips'].append(floating_ip)
def floating_ip_addresses(self):
return [ip['address'] for ip in self['floating_ips']]
@staticmethod
def hydrate(fixed_ip):
fixed_ip = FixedIP(**fixed_ip)
fixed_ip['floating_ips'] = [IP.hydrate(floating_ip)
for floating_ip in fixed_ip['floating_ips']]
return fixed_ip
def __eq__(self, other):
keys = ['address', 'type', 'version', 'floating_ips']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class Route(Model):
"""Represents an IP Route in Nova."""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
self['cidr'] = cidr
self['gateway'] = gateway
# FIXME(mriedem): Is this actually used? It's never set.
self['interface'] = interface
self._set_meta(kwargs)
@classmethod
def hydrate(cls, route):
route = cls(**route)
route['gateway'] = IP.hydrate(route['gateway'])
return route
class Subnet(Model):
"""Represents a Subnet in Nova."""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
self['cidr'] = cidr
self['dns'] = dns or []
self['gateway'] = gateway
self['ips'] = ips or []
self['routes'] = routes or []
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
if self['cidr'] and not self['version']:
self['version'] = netaddr.IPNetwork(self['cidr']).version
def __eq__(self, other):
keys = ['cidr', 'dns', 'gateway', 'ips', 'routes', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def add_route(self, new_route):
if new_route not in self['routes']:
self['routes'].append(new_route)
def add_dns(self, dns):
if dns not in self['dns']:
self['dns'].append(dns)
def add_ip(self, ip):
if ip not in self['ips']:
self['ips'].append(ip)
def as_netaddr(self):
"""Convenient function to get cidr as a netaddr object."""
return netaddr.IPNetwork(self['cidr'])
@classmethod
def hydrate(cls, subnet):
subnet = cls(**subnet)
subnet['dns'] = [IP.hydrate(dns) for dns in subnet['dns']]
subnet['ips'] = [FixedIP.hydrate(ip) for ip in subnet['ips']]
subnet['routes'] = [Route.hydrate(route) for route in subnet['routes']]
subnet['gateway'] = IP.hydrate(subnet['gateway'])
return subnet
class Network(Model):
"""Represents a Network in Nova."""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
self['id'] = id
self['bridge'] = bridge
self['label'] = label
self['subnets'] = subnets or []
self._set_meta(kwargs)
def add_subnet(self, subnet):
if subnet not in self['subnets']:
self['subnets'].append(subnet)
@classmethod
def hydrate(cls, network):
if network:
network = cls(**network)
network['subnets'] = [Subnet.hydrate(subnet)
for subnet in network['subnets']]
return network
def __eq__(self, other):
keys = ['id', 'bridge', 'label', 'subnets']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class VIF8021QbgParams(Model):
"""Represents the parameters for a 802.1qbg VIF."""
def __init__(self, managerid, typeid, typeidversion, instanceid):
super(VIF8021QbgParams, self).__init__()
self['managerid'] = managerid
self['typeid'] = typeid
self['typeidversion'] = typeidversion
self['instanceid'] = instanceid
class VIF8021QbhParams(Model):
"""Represents the parameters for a 802.1qbh VIF."""
def __init__(self, profileid):
super(VIF8021QbhParams, self).__init__()
self['profileid'] = profileid
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
details=None, devname=None, ovs_interfaceid=None,
qbh_params=None, qbg_params=None, active=False,
vnic_type=VNIC_TYPE_NORMAL, profile=None,
preserve_on_delete=False, **kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self['type'] = type
self['details'] = details or {}
self['devname'] = devname
self['ovs_interfaceid'] = ovs_interfaceid
self['qbh_params'] = qbh_params
self['qbg_params'] = qbg_params
self['active'] = active
self['vnic_type'] = vnic_type
self['profile'] = profile
self['preserve_on_delete'] = preserve_on_delete
self._set_meta(kwargs)
def __eq__(self, other):
keys = ['id', 'address', 'network', 'vnic_type',
'type', 'profile', 'details', 'devname',
'ovs_interfaceid', 'qbh_params', 'qbg_params',
'active', 'preserve_on_delete']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def fixed_ips(self):
if self['network']:
return [fixed_ip for subnet in self['network']['subnets']
for fixed_ip in subnet['ips']]
else:
return []
def floating_ips(self):
return [floating_ip for fixed_ip in self.fixed_ips()
for floating_ip in fixed_ip['floating_ips']]
def labeled_ips(self):
"""Returns the list of all IPs
The return value looks like this flat structure::
{'network_label': 'my_network',
'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
'ips': [{'address': '123.123.123.123',
'version': 4,
'type: 'fixed',
'meta': {...}},
{'address': '124.124.124.124',
'version': 4,
'type': 'floating',
'meta': {...}},
{'address': 'fe80::4',
'version': 6,
'type': 'fixed',
'meta': {...}}]
"""
if self['network']:
# remove unnecessary fields on fixed_ips
ips = [IP(**ip) for ip in self.fixed_ips()]
for ip in ips:
# remove floating ips from IP, since this is a flat structure
# of all IPs
del ip['meta']['floating_ips']
# add floating ips to list (if any)
ips.extend(self.floating_ips())
return {'network_label': self['network']['label'],
'network_id': self['network']['id'],
'ips': ips}
return []
def is_hybrid_plug_enabled(self):
return self['details'].get(VIF_DETAILS_OVS_HYBRID_PLUG, False)
def is_neutron_filtering_enabled(self):
return self['details'].get(VIF_DETAILS_PORT_FILTER, False)
def get_physical_network(self):
phy_network = self['network']['meta'].get('physical_network')
if not phy_network:
phy_network = self['details'].get(VIF_DETAILS_PHYSICAL_NETWORK)
return phy_network
@classmethod
def hydrate(cls, vif):
vif = cls(**vif)
vif['network'] = Network.hydrate(vif['network'])
return vif
def get_netmask(ip, subnet):
"""Returns the netmask appropriate for injection into a guest."""
if ip['version'] == 4:
return str(subnet.as_netaddr().netmask)
return subnet.as_netaddr()._prefixlen
class NetworkInfo(list):
"""Stores and manipulates network information for a Nova instance."""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
"""Returns all fixed_ips without floating_ips attached."""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
"""Returns all floating_ips."""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
def hydrate(cls, network_info):
if isinstance(network_info, six.string_types):
network_info = jsonutils.loads(network_info)
return cls([VIF.hydrate(vif) for vif in network_info])
def wait(self, do_raise=True):
"""Wait for asynchronous call to finish."""
# There is no asynchronous call for this class, so this is a no-op
# here, but subclasses may override to provide asynchronous
# capabilities. Must be defined here in the parent class so that code
# which works with both parent and subclass types can reference this
# method.
pass
def json(self):
return jsonutils.dumps(self)
class NetworkInfoAsyncWrapper(NetworkInfo):
"""Wrapper around NetworkInfo that allows retrieving NetworkInfo
in an async manner.
This allows one to start querying for network information before
you know you will need it. If you have a long-running
operation, this allows the network model retrieval to occur in the
background. When you need the data, it will ensure the async
operation has completed.
As an example:
def allocate_net_info(arg1, arg2)
return call_neutron_to_allocate(arg1, arg2)
network_info = NetworkInfoAsyncWrapper(allocate_net_info, arg1, arg2)
[do a long running operation -- real network_info will be retrieved
in the background]
[do something with network_info]
"""
def __init__(self, async_method, *args, **kwargs):
super(NetworkInfoAsyncWrapper, self).__init__()
self._gt = utils.spawn(async_method, *args, **kwargs)
methods = ['json', 'fixed_ips', 'floating_ips']
for method in methods:
fn = getattr(self, method)
wrapper = functools.partial(self._sync_wrapper, fn)
functools.update_wrapper(wrapper, fn)
setattr(self, method, wrapper)
def _sync_wrapper(self, wrapped, *args, **kwargs):
"""Synchronize the model before running a method."""
self.wait()
return wrapped(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__getitem__
return self._sync_wrapper(fn, *args, **kwargs)
def __iter__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__iter__
return self._sync_wrapper(fn, *args, **kwargs)
def __len__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__len__
return self._sync_wrapper(fn, *args, **kwargs)
def __str__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__str__
return self._sync_wrapper(fn, *args, **kwargs)
def __repr__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__repr__
return self._sync_wrapper(fn, *args, **kwargs)
def wait(self, do_raise=True):
"""Wait for asynchronous call to finish."""
if self._gt is not None:
try:
# NOTE(comstud): This looks funky, but this object is
# subclassed from list. In other words, 'self' is really
# just a list with a bunch of extra methods. So this
# line just replaces the current list (which should be
# empty) with the result.
self[:] = self._gt.wait()
except Exception:
if do_raise:
raise
finally:
self._gt = None
| 33.348048 | 79 | 0.639497 | [
"Apache-2.0"
] | Alex-Sizov/nova | nova/network/model.py | 19,642 | Python |
import bs4
import requests
import operator
res = requests.get("http://dollarrupee.in/")
soup = bs4.BeautifulSoup(res.text, "lxml")
rate = soup.select(".item-page p strong")
rupee = float(rate[0].text)
print("Today's rate: $1 = ₹{}".format(rupee))
choice = int(input(
"\nWhat do you want to convert?\n1. Dollars to Rupees\n2. Rupees to Dollars\n"))
if(choice == 1):
amount = int(input("Enter amount in USD:\n"))
print("Today's conversion: ${} = ₹{} (approx.)".format(
amount, round(amount * rupee)))
elif(choice == 2):
amount = int(input("Enter amount in INR:\n"))
print("Today's conversion: ₹{} = ${} (approx.)".format(
amount, round(amount / rupee)))
else:
print("Bad choice!") | 26.851852 | 84 | 0.628966 | [
"MIT"
] | urmilshroff/netscraper | rupee.py | 731 | Python |
# -*- coding: utf-8 -*-
"""
"""
from ill import api
tn = api.request_document('8236596')
print(tn)
#api.download_papers()
#NOT YET IMPLEMENTED
#Not downloaded
#api.delete_online_papers(api.downloaded_paper_ids)
#main.fill_form('610035')
print('Done with the request') | 16.111111 | 52 | 0.67931 | [
"MIT"
] | ScholarTools/ill_filler | ill_filler_quick_testing.py | 290 | Python |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from flexget.event import fire_event
from flexget.manager import Session
from flexget.plugins.modify.variables import Variables
@pytest.mark.usefixtures('tmpdir')
class TestVariablesFromFile(object):
config = """
variables: __tmp__/variables.yml
tasks:
test_variable_from_file:
mock:
- { title: 'test', location: 'http://mock'}
if:
- '{? test_variable ?}': accept
test_variables_alongside_jinja:
mock:
- title: title 1
entry_var: foo
set:
a_field: first {?bar_var?} then {{entry_var|default("shouldn't happen")}} {{fake_field|default("end")}}
accept_all: yes
"""
@pytest.mark.filecopy('variables.yml', '__tmp__/variables.yml')
def test_variable_from_file(self, execute_task, manager):
task = execute_task('test_variable_from_file')
assert len(task.accepted) == 1
@pytest.mark.filecopy('variables.yml', '__tmp__/variables.yml')
def test_variables_alongside_jinja(self, execute_task):
task = execute_task('test_variables_alongside_jinja')
assert len(task.accepted) == 1
assert task.accepted[0]['a_field'] == 'first bar then foo end'
class TestVariablesFromConfig():
config = """
variables:
mock_entry_list:
- title: a
- title: b
integer: 2
tasks:
test_int_var:
mock:
- title: a
- title: b
- title: c
accept_all: yes
limit_new: "{? integer ?}"
test_var_mock:
mock: "{? mock_entry_list ?}"
"""
def test_complex_var(self, execute_task):
task = execute_task('test_var_mock')
assert len(task.all_entries) == 2
assert task.all_entries[1]['title'] == 'b'
def test_int_var(self, execute_task):
task = execute_task('test_int_var')
assert len(task.all_entries) == 3
assert len(task.accepted) == 2
class TestVariablesFromDB(object):
config = """
variables: yes
tasks:
test_variable_from_db:
mock:
- { title: 'test', location: 'http://mock'}
if:
- '{? test_variable_db ?}': accept
"""
def test_variable_from_db(self, execute_task, manager):
with Session() as session:
s = Variables(variables={'test_variable_db': True})
session.add(s)
fire_event('manager.before_config_validate', manager.config, manager)
task = execute_task('test_variable_from_db')
assert len(task.accepted) == 1
| 29.925532 | 117 | 0.602204 | [
"MIT"
] | Daeymien/Flexget | flexget/tests/test_variables.py | 2,813 | Python |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 15:00:26 2018
@author: Alex
# reads and parses local html
"""
#%% Import libraries
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import codecs
import os
import re
import pickle
import nltk
from nltk.stem.wordnet import WordNetLemmatizer
import spacy
from gensim import models
import pickle
from sklearn.feature_extraction import stop_words
#%% Read in saved html
# read in saved html back in
def read_local_html(blog_folder,blog_num):
# make filename
filename = blog_folder + 'kd_blog' + str(blog_num).zfill(4) + '.html'
# read in file
f = codecs.open(filename, 'r', 'utf-8')
# parse file
soup = BeautifulSoup(f.read(), 'html.parser')
return soup
#%%
def get_article_str(soup):
# Titles
title = soup.title.text
# Tag data
tag = soup.find_all('div', class_ = 'tag-data')
tags = tag[0].text
tags = tags.replace('Tags: ','')
# Paragraphs
paras = soup.find_all('p')
# The first paragraph always contains a description of the article
description = paras[0].text
# Get main text
main_text = ""
# remove second paragraph if it just contains author name
if "By " not in paras[1].text:
main_text = paras[1].text
for i in range(2,len(paras)):
# These if statements remove later paragraphs if they don't contain the main text of the article
if i > len(paras)-5 and "Bio" in paras[i].text:
continue
elif i > len(paras)-5 and "Original" in paras[i].text:
continue
elif i > len(paras)-5 and "Related" in paras[i].text:
continue
elif i > len(paras)-5 and "disqus" in paras[i].text:
continue
elif i > len(paras)-5 and "Pages" in paras[i].text:
continue
else:
main_text = main_text + ' ' + paras[i].text
# Create an article string
article_str = title + '. ' + tags + '. ' + description + ' ' + main_text
return article_str
#%%
def clean_article(article_str):
# lowercase
article_str = article_str.lower()
#Remove any non alphanumeric characters that are no end-of-sentence punctuation
article_str = re.sub('[^a-z\s\.\?\!]+','', article_str)
# Replace ? with .
article_str = re.sub('\?','.', article_str)
# Replace ! with .
article_str = re.sub('\!','.', article_str)
# Replace more than one whitespace with one whitespace
article_str = re.sub('\s+',' ', article_str)
# Remove trailing whitespace
article_str = re.sub("\s+(?!\S)", "",article_str)
# Remove preceding whitespace
article_str = re.sub("(?<!\S)\s+", "",article_str)
# Replace funny words from lemmatization
article_str = re.sub("datum","data",article_str)
article_str = re.sub("learn\s","learning",article_str)
article_str = re.sub("miss","missing",article_str)
return article_str
#%% Split each blog post into sentences
def get_sentences(article_str):
# lowercase
article_str = article_str.lower()
#Remove any non alphanumeric characters
article_str = re.sub('[^a-z\s\.]+','', article_str)
article_str = re.sub('\s+',' ', article_str)
# Split doc into sentences
sent_text = nltk.sent_tokenize(article_str)
# Split sentences into words
tokenized_sentences = []
for sentence in sent_text:
# remove periods
sentence = re.sub('\.','', sentence)
# tokenize
tokenized_sentences.append(nltk.word_tokenize(sentence))
return tokenized_sentences
#%%
def lemmatize(cleaned_article):
nlp = spacy.load('en', disable=['parser', 'ner'])
doc = nlp(article_str)
lemma_article = " ".join([token.lemma_ if token.lemma_ not in ['-PRON-'] else '' for token in doc])
cleaned_lemma = clean_article(lemma_article)
return cleaned_lemma
#%% Extract phrases from all the documents
def phrase_extractor(doc_sents):
'''
doc_sents is a list where each element is a list with elements corresponding to individual sentences of a document
'''
# rename some functions
Phraser = models.phrases.Phraser
Phrases = models.phrases.Phrases
# Generate list of sentences
sentence_stream = sum(doc_sents, [])
# Generate bigrams
common_terms = ["of", "with", "without", "and", "or", "the", "a", "as"]
phrases = Phrases(sentence_stream, common_terms=common_terms)
bigram = Phraser(phrases)
# Generate trigrams
trigram = Phrases(bigram[sentence_stream])
# Generate output
output_strs = []
for idx in range(0,len(doc_sents)):
doc = doc_sents[idx]
output_doc = list(trigram[doc])
output_str = sum(output_doc,[])
output_strs.append(' '.join(output_str))
return output_strs
#%% Loop through all the blog posts
blog_folder = 'C:\\Users\\Alex\\Documents\\GitHub\\insight-articles-project\\data\\raw\\kd_blogs\\'
os.chdir(blog_folder)
num_blog_posts = len(os.listdir(blog_folder))
documents = []
num_skipped = 0
blogs_included = []
doc_sents = []
for blog_num in range(1,num_blog_posts+1):
try:
# Parse html
soup = read_local_html(blog_folder,blog_num)
article_str = get_article_str(soup)
cleaned_article = clean_article(article_str)
lemma_article = lemmatize(cleaned_article)
# Extract sentences for phrase extraction
tokenized_sentences = get_sentences(lemma_article)
doc_sents.append(tokenized_sentences)
# Meta data
blogs_included.append(blog_num)
except:
print('Blog ' + str(blog_num) + ' skipped')
num_skipped += 1
documents = phrase_extractor(doc_sents)
#documents.append(cleaned_article)
# Save documents
processed_data_folder = 'C:\\Users\\Alex\\Documents\\GitHub\\insight-articles-project\\data\\processed\\'
filename = processed_data_folder + 'kd_docs'
with open(filename, 'wb') as fp:
pickle.dump((documents,blogs_included), fp)
'''
filename = processed_data_folder + 'doc_sents'
with open(filename, 'wb') as fp:
pickle.dump(doc_sents, fp)
''' | 28.324324 | 118 | 0.639472 | [
"MIT"
] | avbatchelor/insight-articles-project | src/scraping/read_and_parse.py | 6,288 | Python |
"""
.. module: lemur.domains.models
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from sqlalchemy import Column, Integer, String, Boolean, Index
from lemur.database import db
class Domain(db.Model):
__tablename__ = "domains"
__table_args__ = (
Index(
"ix_domains_name_gin",
"name",
postgresql_ops={"name": "gin_trgm_ops"},
postgresql_using="gin",
),
)
id = Column(Integer, primary_key=True)
name = Column(String(256), index=True)
sensitive = Column(Boolean, default=False)
def __repr__(self):
return "Domain(name={name})".format(name=self.name)
| 25.774194 | 62 | 0.635795 | [
"Apache-2.0"
] | DZ521111/lemur | lemur/domains/models.py | 799 | Python |
import cv2
import os
import numpy as np
from image_processor import process_image
from processor_properties import ProcessorProperties
import time
class Camera:
def __init__(self):
self.cap = cv2.VideoCapture(0)
def snapshot(self):
ret, frame = self.cap.read()
return frame
if __name__ == '__main__':
camera = Camera()
while True:
frame = camera.snapshot()
props = ProcessorProperties()
# props.brightness_factor.update(1.5)
# props.contrast_factor.update(1.5)
# props.scaling_factor.update(3.0)
frame = process_image(frame, props)
cv2.imshow('image', frame)
k = cv2.waitKey(1) & 0xFF
if k == ord('q'):
break
elif k == ord('s'):
timestr = time.strftime("%Y%m%d-%H%M%S")
image_path = os.path.join("testimgs", "%s.jpg" % timestr)
cv2.imwrite(image_path, frame)
print "save %s" % image_path | 27.027778 | 69 | 0.599178 | [
"MIT"
] | korrawat/athack-susan | camera_skeleton.py | 973 | Python |
# encoding: utf-8
"""Placeholder-related objects.
Specific to shapes having a `p:ph` element. A placeholder has distinct behaviors
depending on whether it appears on a slide, layout, or master. Hence there is a
non-trivial class inheritance structure.
"""
from pptx.enum.shapes import MSO_SHAPE_TYPE, PP_PLACEHOLDER
from pptx.oxml.shapes.graphfrm import CT_GraphicalObjectFrame
from pptx.oxml.shapes.picture import CT_Picture
from pptx.shapes.autoshape import Shape
from pptx.shapes.graphfrm import GraphicFrame
from pptx.shapes.picture import Picture
from pptx.util import Emu
class _InheritsDimensions(object):
"""
Mixin class that provides inherited dimension behavior. Specifically,
left, top, width, and height report the value from the layout placeholder
where they would have otherwise reported |None|. This behavior is
distinctive to placeholders. :meth:`_base_placeholder` must be overridden
by all subclasses to provide lookup of the appropriate base placeholder
to inherit from.
"""
@property
def height(self):
"""
The effective height of this placeholder shape; its directly-applied
height if it has one, otherwise the height of its parent layout
placeholder.
"""
return self._effective_value("height")
@height.setter
def height(self, value):
self._element.cy = value
@property
def left(self):
"""
The effective left of this placeholder shape; its directly-applied
left if it has one, otherwise the left of its parent layout
placeholder.
"""
return self._effective_value("left")
@left.setter
def left(self, value):
self._element.x = value
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def top(self):
"""
The effective top of this placeholder shape; its directly-applied
top if it has one, otherwise the top of its parent layout
placeholder.
"""
return self._effective_value("top")
@top.setter
def top(self, value):
self._element.y = value
@property
def width(self):
"""
The effective width of this placeholder shape; its directly-applied
width if it has one, otherwise the width of its parent layout
placeholder.
"""
return self._effective_value("width")
@width.setter
def width(self, value):
self._element.cx = value
@property
def _base_placeholder(self):
"""
Return the layout or master placeholder shape this placeholder
inherits from. Not to be confused with an instance of
|BasePlaceholder| (necessarily).
"""
raise NotImplementedError("Must be implemented by all subclasses.")
def _effective_value(self, attr_name):
"""
The effective value of *attr_name* on this placeholder shape; its
directly-applied value if it has one, otherwise the value on the
layout placeholder it inherits from.
"""
directly_applied_value = getattr(super(_InheritsDimensions, self), attr_name)
if directly_applied_value is not None:
return directly_applied_value
return self._inherited_value(attr_name)
def _inherited_value(self, attr_name):
"""
Return the attribute value, e.g. 'width' of the base placeholder this
placeholder inherits from.
"""
base_placeholder = self._base_placeholder
if base_placeholder is None:
return None
inherited_value = getattr(base_placeholder, attr_name)
return inherited_value
class _BaseSlidePlaceholder(_InheritsDimensions, Shape):
"""Base class for placeholders on slides.
Provides common behaviors such as inherited dimensions.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def _base_placeholder(self):
"""
Return the layout placeholder this slide placeholder inherits from.
Not to be confused with an instance of |BasePlaceholder|
(necessarily).
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
def _replace_placeholder_with(self, element):
"""
Substitute *element* for this placeholder element in the shapetree.
This placeholder's `._element` attribute is set to |None| and its
original element is free for garbage collection. Any attribute access
(including a method call) on this placeholder after this call raises
|AttributeError|.
"""
element._nvXxPr.nvPr._insert_ph(self._element.ph)
self._element.addprevious(element)
self._element.getparent().remove(self._element)
self._element = None
class BasePlaceholder(Shape):
"""
NOTE: This class is deprecated and will be removed from a future release
along with the properties *idx*, *orient*, *ph_type*, and *sz*. The *idx*
property will be available via the .placeholder_format property. The
others will be accessed directly from the oxml layer as they are only
used for internal purposes.
Base class for placeholder subclasses that differentiate the varying
behaviors of placeholders on a master, layout, and slide.
"""
@property
def idx(self):
"""
Integer placeholder 'idx' attribute, e.g. 0
"""
return self._sp.ph_idx
@property
def orient(self):
"""
Placeholder orientation, e.g. ST_Direction.HORZ
"""
return self._sp.ph_orient
@property
def ph_type(self):
"""
Placeholder type, e.g. PP_PLACEHOLDER.CENTER_TITLE
"""
return self._sp.ph_type
@property
def sz(self):
"""
Placeholder 'sz' attribute, e.g. ST_PlaceholderSize.FULL
"""
return self._sp.ph_sz
class LayoutPlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a slide layout, providing differentiated behavior
for slide layout placeholders, in particular, inheriting shape properties
from the master placeholder having the same type, when a matching one
exists.
"""
@property
def _base_placeholder(self):
"""
Return the master placeholder this layout placeholder inherits from.
"""
base_ph_type = {
PP_PLACEHOLDER.BODY: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.BITMAP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CENTER_TITLE: PP_PLACEHOLDER.TITLE,
PP_PLACEHOLDER.ORG_CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.DATE: PP_PLACEHOLDER.DATE,
PP_PLACEHOLDER.FOOTER: PP_PLACEHOLDER.FOOTER,
PP_PLACEHOLDER.MEDIA_CLIP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.OBJECT: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.PICTURE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.SLIDE_NUMBER: PP_PLACEHOLDER.SLIDE_NUMBER,
PP_PLACEHOLDER.SUBTITLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TABLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TITLE: PP_PLACEHOLDER.TITLE,
}[self._element.ph_type]
slide_master = self.part.slide_master
return slide_master.placeholders.get(base_ph_type, None)
class MasterPlaceholder(BasePlaceholder):
"""
Placeholder shape on a slide master.
"""
class NotesSlidePlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a notes slide. Inherits shape properties from the
placeholder on the notes master that has the same type (e.g. 'body').
"""
@property
def _base_placeholder(self):
"""
Return the notes master placeholder this notes slide placeholder
inherits from, or |None| if no placeholder of the matching type is
present.
"""
notes_master = self.part.notes_master
ph_type = self.element.ph_type
return notes_master.placeholders.get(ph_type=ph_type)
class SlidePlaceholder(_BaseSlidePlaceholder):
"""
Placeholder shape on a slide. Inherits shape properties from its
corresponding slide layout placeholder.
"""
class ChartPlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a chart."""
def insert_chart(self, chart_type, chart_data):
"""
Return a |PlaceholderGraphicFrame| object containing a new chart of
*chart_type* depicting *chart_data* and having the same position and
size as this placeholder. *chart_type* is one of the
:ref:`XlChartType` enumeration values. *chart_data* is a |ChartData|
object populated with the categories and series values for the chart.
Note that the new |Chart| object is not returned directly. The chart
object may be accessed using the
:attr:`~.PlaceholderGraphicFrame.chart` property of the returned
|PlaceholderGraphicFrame| object.
"""
rId = self.part.add_chart_part(chart_type, chart_data)
graphicFrame = self._new_chart_graphicFrame(
rId, self.left, self.top, self.width, self.height
)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_chart_graphicFrame(self, rId, x, y, cx, cy):
"""
Return a newly created `p:graphicFrame` element having the specified
position and size and containing the chart identified by *rId*.
"""
id_, name = self.shape_id, self.name
return CT_GraphicalObjectFrame.new_chart_graphicFrame(
id_, name, rId, x, y, cx, cy
)
class PicturePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a picture."""
def insert_picture(self, image_file):
"""Return a |PlaceholderPicture| object depicting the image in `image_file`.
`image_file` may be either a path (string) or a file-like object. The image is
cropped to fill the entire space of the placeholder. A |PlaceholderPicture|
object has all the properties and methods of a |Picture| shape except that the
value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is
`MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.
"""
pic = self._new_placeholder_pic(image_file)
self._replace_placeholder_with(pic)
return PlaceholderPicture(pic, self._parent)
def _new_placeholder_pic(self, image_file):
"""
Return a new `p:pic` element depicting the image in *image_file*,
suitable for use as a placeholder. In particular this means not
having an `a:xfrm` element, allowing its extents to be inherited from
its layout placeholder.
"""
rId, desc, image_size = self._get_or_add_image(image_file)
shape_id, name = self.shape_id, self.name
pic = CT_Picture.new_ph_pic(shape_id, name, desc, rId)
pic.crop_to_fit(image_size, (self.width, self.height))
return pic
def _get_or_add_image(self, image_file):
"""
Return an (rId, description, image_size) 3-tuple identifying the
related image part containing *image_file* and describing the image.
"""
image_part, rId = self.part.get_or_add_image_part(image_file)
desc, image_size = image_part.desc, image_part._px_size
return rId, desc, image_size
class PlaceholderGraphicFrame(GraphicFrame):
"""
Placeholder shape populated with a table, chart, or smart art.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
class PlaceholderPicture(_InheritsDimensions, Picture):
"""
Placeholder shape populated with a picture.
"""
@property
def _base_placeholder(self):
"""
Return the layout placeholder this picture placeholder inherits from.
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
class TablePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a table."""
def insert_table(self, rows, cols):
"""Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table.
The position and width of the table are those of the placeholder and its height
is proportional to the number of rows. A |PlaceholderGraphicFrame| object has
all the properties and methods of a |GraphicFrame| shape except that the value
of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally
`MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table
but rather *contains* the new table. The table can be accessed using the
:attr:`~.PlaceholderGraphicFrame.table` property of the returned
|PlaceholderGraphicFrame| object.
"""
graphicFrame = self._new_placeholder_table(rows, cols)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_placeholder_table(self, rows, cols):
"""
Return a newly added `p:graphicFrame` element containing an empty
table with *rows* rows and *cols* columns, positioned at the location
of this placeholder and having its same width. The table's height is
determined by the number of rows.
"""
shape_id, name, height = self.shape_id, self.name, Emu(rows * 370840)
return CT_GraphicalObjectFrame.new_table_graphicFrame(
shape_id, name, rows, cols, self.left, self.top, self.width, height
)
| 35.987624 | 87 | 0.673499 | [
"MIT"
] | Adriyst/python-pptx | pptx/shapes/placeholder.py | 14,539 | Python |
# #########################################################################
# Copyright (c) , UChicago Argonne, LLC. All rights reserved. #
# #
# See LICENSE file. #
# #########################################################################
"""
This tools auto eliminate aliens from CDI experiment data. It is configuration driven.
"""
import numpy as np
import sys
import os
from sklearn.cluster import DBSCAN
import tifffile as tif
from time import time
import cohere.src_py.utilities.utils as ut
__author__ = "Kenly Pelzer, Ross Harder"
__copyright__ = "Copyright (c) 2021, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = ['get_asymmetry',
'analyze_clusters',
'crop_center',
'save_arr',
'save_arrays',
'auto_alien1',
'filter_aliens',
'remove_blocks',
'remove_aliens']
def get_asymmetry(arr):
"""
Returns asymmetry of an array.
Parameters
----------
arr : ndarray
an array to find asymmetry
Returns
-------
ndarray
an array capturing asymmetry of original array
"""
arr_rev = arr[::-1, ::-1, ::-1]
denom = (arr + arr_rev) / 2.0
denom_nz = np.where(denom == 0, 1.0, denom)
asym = np.where(denom > 0.0, abs(arr - arr_rev) / denom_nz, 0.0)
# asym only assigned to non-zero intensity points in the passed array
return np.where(arr > 0, asym, 0)
# add output of absolute cluster size.
def analyze_clusters(arr, labels, nz):
"""
Analyzes clusters and returns characteristics in arrays.
Parameters
----------
arr : ndarray
the analyzed array
labels: arr
cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1.
nz : tuple
tuple of arrays, each array containing indices of elements in arr that are non-zero along one axis.
Returns
-------
tuple
tuple containing the following arrays:
nlabels # number of labels, i.e clusters
labels_arr # array with label for each non zero point
rel_cluster_size # array with cluster size divided by max cluster size for each
# non zero point
cluster_avg # array with cluster average for each non zero point
noise_arr # array with points that are non zero but not in cluster
no_noise # array with noise poits set to 0
label_counts # tuple of two arrays: First is label number, second is number of
# occurances of that label (size of cluster)
cluster_avg_asym # array with average asymmetry of a points in cluster
asymmetry # array of asymmetry with regard to entire array
cluster_size # array with cluster size for each non zero point
"""
labels_arr = np.zeros_like(arr)
noise_arr = np.zeros_like(arr)
cluster_size = np.zeros_like(arr)
cluster_avg = np.zeros_like(arr).astype(np.float32)
cluster_avg_asym = np.zeros_like(arr).astype(np.float32)
asymmetry = get_asymmetry(arr)
# label_counts is tuple of two arrays. First is label number, second is number of occurances of that label (size of cluster).
label_counts = np.unique(labels, return_counts=True)
# nz and labels are the same length. so the indicies given by nz will be set
# to their corresponding cluster number (includes noise pts).
labels_arr[nz] = labels
# this selects the nz indicies where labels=-1 (noise)
noise_pts = tuple([nz[n][labels == -1] for n in range(3)])
no_noise = arr
# move the points labeled noise into their own array
# remove the noise out of arr (no_noise is copy of arr)
noise_arr[noise_pts] = arr[noise_pts]
no_noise[noise_pts] = 0
nlabels = len(label_counts[0])
# print("processing labels")
# loop over the labels (clusters). label_counts[0] is the unique labels
for n in range(1, nlabels):
# print(" %i %i "%(label_counts[0][n],label_counts[1][n]), end='\r')
# the nth label from the first array of the label_counts tuple
n_lab = label_counts[0][n]
# the indicies of the points belonging to label n
cluspts = tuple([nz[d][labels == n_lab] for d in range(3)])
# the second array of the label_counts tuple is the number of points
# with that label. So put those into an array.
cluster_size[cluspts] = label_counts[1][n]
# compute the average intensity of each cluster and write into an array.
cluster_avg[cluspts] = np.sum(arr[cluspts]) / cluspts[0].size
# compute average asym of each cluster and store in array.
cluster_avg_asym[cluspts] = np.sum(asymmetry[cluspts]) / cluspts[0].size
# print(" %i %i %f %f "%(label_counts[0][n],label_counts[1][n],np.sum(asymmetry[cluspts]),cluspts[0].size), end='\n')
# print("largest clus size", cluster_size.max())
# compute relative cluster sizes to largest (main) cluster.
rel_cluster_size = cluster_size / cluster_size.max()
# return all of these arrays
return (
nlabels, labels_arr, rel_cluster_size, cluster_avg, noise_arr, no_noise, label_counts, cluster_avg_asym, asymmetry,
cluster_size)
def crop_center(arr):
"""
Finds max element in array and crops the array to be symetrical with regard to this point in each direction.
Parameters
----------
arr : ndarray
an array
Returns
-------
centered : ndarray
an array symetrical in all dimensions around the max element of input array
"""
shape = arr.shape
# This tells us the point of highest intensity, which we will use as the center for inversion operations
center = np.unravel_index(np.argmax(arr, axis=None), shape)
# clip the largest possible cuboid putting the point of highest intensity at the center
principium = []
finis = []
for i in range(len(shape)):
half_shape = min(center[i], shape[i] - center[i] - 1)
principium.append(center[i] - half_shape)
finis.append(center[i] + half_shape + 1)
centered = arr[principium[0]:finis[0], principium[1]:finis[1], principium[2]:finis[2]]
return centered
def save_arr(arr, dir, fname):
"""
Saves an array in 'tif' format file.
Parameters
----------
arr : ndarray
an array to save
dir : str
directory to save the file to
fname : str
file name
Returns
-------
nothing
"""
if dir is not None:
full_name = os.path.join(dir, fname)
else:
full_name = fname # save in the current dir
tif.imsave(full_name, arr.transpose().astype(np.float32))
def save_arrays(arrs, iter, thresh, eps, dir):
"""
Saves multiple arrays in 'tif' format files. Determines file name from given parameters: iteration, threshold, and eps.
Parameters
----------
arr : tuple
a tuple of arrays to save
iter, thresh, eps : str, str, str
parameters: iteration, threshold, and eps, to deliver file name from
dir : str
directory to save the file to
Returns
-------
nothing
"""
save_arr(arrs[1], dir, "db%d_%3.2f_labels_arr%3.2f.tif" % (iter, thresh, eps))
save_arr(arrs[2], dir, "db%d_%3.2f_rel_clustersizes%3.2f.tif" % (iter, thresh, eps))
save_arr(arrs[3], dir, "db%d_%3.2f_clusteravg%3.2f.tif" % (iter, thresh, eps))
save_arr(arrs[4], dir, "db%d_%3.2f_noise%3.2f.tif" % (iter, thresh, eps))
save_arr(arrs[5], dir, "db%d_%3.2f_no_noise%3.2f.tif" % (iter, thresh, eps))
save_arr(arrs[7], dir, "db%d_%3.2f_clusteravgasym%3.2f.tif" % (iter, thresh, eps))
save_arr(arrs[8], dir, "db%d_%3.2f_asym%3.2f.tif" % (iter, thresh, eps))
save_arr(arrs[9], dir, "db%d_%3.2f_abs_clustersizes%3.2f.tif" % (iter, thresh, eps))
def auto_alien1(data, config, data_dir=None):
"""
Removes aliens from experimental CDI data using iterative algorithm and returns the result.
The algorithm follows the steps:
1. Initialization:
- initialize variables with the configuration parameters
- crop the data array around maximum element to it's biggest size
- sets points below threshold value to 0
- finds non-zero elements of the data array and keeps them as tuples of indices
2. Iteration loop, runs until number of clasters remains unchanged
- runs DBSCAN algorithm on the non-zero and returns clasters labels
- analyzes the results to find relative clusters sizes, and clusters average asymmetry, and other characteristics
- removes alien clusters, i.e. the ones with relative cluster size below configured size threshold and with average asymmetry over configured asymmetry threshold
- go back to the loop using the non-zero elements of alien removed array to the DBSCAN
3. If configured, add final step to apply gaussian convolusion to the result and use it as a filter with configured sigma as threshold
Parameters
----------
data : ndarray
an array with experiment data
config : Object
configuration object providing access to configuration parameters
data_dir : str
a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured
Returns
-------
cuboid : ndarray
data array with removed aliens
"""
try:
size_threshold = config.AA1_size_threshold
except AttributeError:
size_threshold = 0.01
except Exception as e:
print ('error parsing AA1_size_threshold ', str(e))
try:
asym_threshold = config.AA1_asym_threshold
except AttributeError:
asym_threshold = 1.75
except Exception as e:
print ('error parsing AA1_asym_threshold ', str(e))
try:
min_pts = config.AA1_min_pts
except AttributeError:
min_pts = 5
except Exception as e:
print ('error parsing AA1_min_pts ', str(e))
try:
eps = config.AA1_eps
except AttributeError:
eps = 1.1
except Exception as e:
print ('error parsing AA1_eps ', str(e))
try:
threshold = config.AA1_amp_threshold
except AttributeError:
print ('AA1_amp_threshold parameter not configured, not removing aliens')
return data
except Exception as e:
print ('error parsing AA1_amp_threshold ', str(e))
try:
save_arrs = config.AA1_save_arrs
if save_arrs:
save_dir = os.path.join(data_dir, 'alien_analysis')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
except AttributeError:
save_arrs = False
except Exception as e:
print ('error parsing save_arrs ', str(e))
try:
expandcleanedsig = config.AA1_expandcleanedsigma
except AttributeError:
expandcleanedsig = 0.0
except Exception as e:
print ('error parsing expandcleanedsig ', str(e))
cuboid = crop_center(data)
cuboid = np.where(cuboid >= threshold, cuboid, 0)
if (save_arrs):
save_arr(cuboid, save_dir, "db%3.2f_cuboid%3.2f.tif" % (threshold, eps))
save_arr(cuboid[::-1, ::-1, ::-1], save_dir, "db%3.2f_cuboidrev%3.2f.tif" % (threshold, eps))
# the non_zero is a tuple of arrays, each array containing indices of elements that are non-zero along one axis.
non_zero = cuboid.nonzero()
# https://scikit-learn.org/stable/modules/clustering.html#dbscan
# labels is same size as input arr with a cluster label per point
iter = 0
nclusters = 0
finished = False
while ( not finished ):
non_zero = cuboid.nonzero()
# print("running db", iter)
labels = DBSCAN(eps=eps, metric='euclidean', min_samples=min_pts, n_jobs=-1).fit_predict(
np.array(non_zero).transpose().astype(np.float32))
# print("running analyze_clusters", iter)
arrs = analyze_clusters(cuboid, labels, non_zero)
if (save_arrs):
save_arrays(arrs, iter, threshold, eps, save_dir)
if nclusters == arrs[0]:
finished = True
nclusters = arrs[0]
if iter == 0: # keep values for all iterations
rel_cluster_size = arrs[2]
cluster_avg_asym = arrs[7]
# print("cleaning cuboid", iter)
cuboid = np.where(np.logical_and(rel_cluster_size < size_threshold, cluster_avg_asym > asym_threshold), 0.0, cuboid)
# print("iter", iter, nclusters)
iter += 1
if (expandcleanedsig > 0):
cuboid = np.where(cuboid > 0, 1.0, 0.0)
sig = [expandcleanedsig, expandcleanedsig, 1.0]
cuboid = ut.gauss_conv_fft(cuboid, sig)
no_thresh_cuboid = crop_center(data)
cuboid = np.where(cuboid > 0.1, no_thresh_cuboid, 0.0)
return cuboid
def remove_blocks(data, config_map):
"""
Sets to zero given alien blocks in the data array.
Parameters
----------
data : ndarray
an array with experiment data
config : Object
configuration object providing access to configuration parameters
Returns
-------
data : ndarray
data array with zeroed out aliens
"""
try:
aliens = config_map.aliens
for alien in aliens:
# The ImageJ swaps the x and y axis, so the aliens coordinates needs to be swapped, since ImageJ is used
# to find aliens
data[alien[0]:alien[3], alien[1]:alien[4], alien[2]:alien[5]] = 0
except AttributeError:
print ('aliens parameter not configured')
except Exception as e:
print ('did not remove aliens, error in aliens removal ', str(e))
return data
def filter_aliens(data, config_map):
"""
Sets to zero points in the data array defined by a file.
Parameters
----------
data : ndarray
an array with experiment data
config : Object
configuration object providing access to configuration parameters
Returns
-------
data : ndarray
data array with zeroed out aliens
"""
try:
alien_file = config_map.alien_file
if os.path.isfile(alien_file):
mask = np.load(alien_file)
for i in range(len(mask.shape)):
if mask.shape[i] != data.shape[i]:
print ('exiting, mask must be of the same shape as data:', data.shape)
return
data = np.where((mask==1), data, 0.0)
except AttributeError:
print ('alien_file parameter not configured')
except Exception as e:
print ('did not remove aliens, error in aliens removal ', str(e))
return data
def remove_aliens(data, config_map, data_dir=None):
"""
Finds which algorithm is cofigured to remove the aliens and applies it to clean the data.
Parameters
----------
data : ndarray
an array with experiment data
config : Object
configuration object providing access to configuration parameters
data_dir : str
a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured
Returns
-------
data : ndarray
data array without aliens
"""
try:
algorithm = config_map.alien_alg
if algorithm == 'block_aliens':
data = remove_blocks(data, config_map)
elif algorithm == 'alien_file':
data = filter_aliens(data, config_map)
elif algorithm == 'AutoAlien1':
data = auto_alien1(data, config_map, data_dir)
elif algorithm != 'none':
print('not supported alien removal algorithm', algorithm)
except AttributeError:
pass
except Exception as e:
print ('did not remove aliens, error in aliens removal, error: ', str(e))
return data
## https://stackoverflow.com/questions/51503672/decorator-for-timeit-timeit-method/51503837#51503837
#from functools import wraps
#from time import time
#
#def measure(func):
# @wraps(func)
# def _time_it(*args, **kwargs):
# start = int(round(time() * 1000))
# try:
# return func(*args, **kwargs)
# finally:
# end_ = int(round(time() * 1000)) - start
# print(f"Total execution time: {end_ if end_ > 0 else 0} ms")
#
| 36.319475 | 165 | 0.625798 | [
"BSD-3-Clause"
] | AdvancedPhotonSource/cdisupp | scripts/alien_tools.py | 16,598 | Python |
#!/usr/bin/env python2
import rospy
from gnss_status_viewer import Status
from nmea_msgs.msg import Sentence
import sys
import copy
# Previous and current Status
prev = None
curr = None
def print_current_status(status):
"""
Prints the current status
:param status:
:return:
"""
print(status)
# Move to the beginning of the previous line
for i in range(str(status).count('\n') + 1):
sys.stdout.write('\033[F')
def nmea_cb(msg):
global prev
global curr
if prev is None:
prev = Status(msg.sentence)
return
curr = Status(msg.sentence)
if not curr.is_gga:
return
if prev != curr:
status_change = Status.get_status_change(prev, curr)
[ rospy.loginfo(s) for s in status_change ]
n = max(map(lambda line: len(line), status_change))
print(' ' * n)
print_current_status(curr)
prev = copy.deepcopy(curr)
rospy.init_node('gnss_status_viewer_node')
rospy.Subscriber('nmea_sentence', Sentence, nmea_cb)
rospy.spin()
| 19.351852 | 60 | 0.655502 | [
"MIT"
] | naoki-mizuno/gnss_status_viewer | nodes/gnss_status_viewer_node.py | 1,045 | Python |
'''
Generate uv position map of 300W_LP.
'''
import os, sys
import numpy as np
import scipy.io as sio
import random as ran
from skimage.transform import SimilarityTransform
from skimage import io, util
import skimage.transform
from time import time
import cv2
import matplotlib.pyplot as plt
sys.path.append('..')
import face3d
from face3d import mesh
from face3d.morphable_model import MorphabelModel
def process_uv(uv_coords, uv_h = 256, uv_w = 256):
uv_coords[:,0] = uv_coords[:,0]*(uv_w - 1)
uv_coords[:,1] = uv_coords[:,1]*(uv_h - 1)
uv_coords[:,1] = uv_h - uv_coords[:,1] - 1
uv_coords = np.hstack((uv_coords, np.zeros((uv_coords.shape[0], 1)))) # add z
return uv_coords
def run_posmap_300W_LP(bfm, image_path, mat_path, save_folder, uv_h = 256, uv_w = 256, image_h = 256, image_w = 256):
# 1. load image and fitted parameters
image_name = image_path.strip().split('/')[-1]
image = io.imread(image_path)/255;
[h, w, c] = image.shape;
info = sio.loadmat(mat_path);
pose_para = info['Pose_Para'].T.astype(np.float32);
shape_para = info['Shape_Para'].astype(np.float32);
exp_para = info['Exp_Para'].astype(np.float32);
# 2. generate mesh;
# generate shape
vertices = bfm.generate_vertices(shape_para, exp_para);
# transform mesh
s = pose_para[-1, 0];
angles = pose_para[:3, 0];
t = pose_para[3:6, 0];
transformed_vertices = bfm.transform_3ddfa(vertices, s, angles, t)
projected_vertices = transformed_vertices.copy() # using stantard camera & orth projection as in 3DDFA
image_vertices = projected_vertices.copy()
image_vertices[:,1] = h - image_vertices[:,1] - 1
# 3. crop image with key points
kpt = image_vertices[bfm.kpt_ind, :].astype(np.int32)
left = np.min(kpt[:, 0])
right = np.max(kpt[:, 0])
top = np.min(kpt[:, 1])
bottom = np.max(kpt[:, 1])
center = np.array([right - (right - left) / 2.0,
bottom - (bottom - top) / 2.0])
old_size = (right - left + bottom - top)/2
size = int(old_size*1.5)
# random pertube. you can change the numbers
marg = old_size*0.1
t_x = np.random.rand()*marg*2 - marg
t_y = np.random.rand()*marg*2 - marg
center[0] = center[0]+t_x;
center[1] = center[1]+t_y
size = size*(np.random.rand()*0.2 + 0.9)
# crop and record the transform parameters
src_pts = np.array([[center[0]-size/2, center[1]-size/2], [center[0] - size/2, center[1]+size/2], [center[0]+size/2, center[1]-size/2]]);
DST_PTS = np.array([[0, 0], [0, image_h - 1], [image_w - 1, 0]]);
tform = skimage.transform.estimate_transform('similarity', src_pts, DST_PTS);
# transform face position(image vertices) along with 2d facial image
angle = np.random.rand() * 90 - 45;
rows, cols = image.shape[0], image.shape[1];
# rotation around center
center = np.array((cols, rows)) / 2. - 0.5;
tform1 = SimilarityTransform(translation=center);
tform2 = SimilarityTransform(rotation=np.deg2rad(angle));
tform3 = SimilarityTransform(translation=-center);
rotate_transform = tform3 + tform2 + tform1;
tform = rotate_transform + tform;
opt = ran.randint(1,2);
cropped_image = skimage.transform.warp(image, tform.inverse, output_shape=(image_h, image_w));
position = image_vertices.copy()
position[:, 2] = 1
position = np.dot(position, tform.params.T)
position[:, 2] = image_vertices[:, 2]*tform.params[0, 0] # scale z
position[:, 2] = position[:, 2] - np.min(position[:, 2]) # translate z
# 4. uv position map: render position in uv space
uv_position_map = mesh.render.render_colors(uv_coords, bfm.full_triangles, position, uv_h, uv_w, c = 3)
#cv2.imshow('image', cropped_image);
#cv2.waitKey(0);
#cv2.destroyAllWindows();
# 5. save files
io.imsave('{}\{}'.format(save_folder, image_name), np.squeeze(cropped_image));
np.save('{}\{}'.format(save_folder, image_name.replace('jpg', 'npy')), uv_position_map);
io.imsave('{}\{}'.format('results/uv_maps/', image_name.replace('.jpg', '_posmap.jpg')), (uv_position_map)/abs(uv_position_map.max())); # only for show
#cv2.imwrite(image_name[:-4]+'_posmap.jpg',uv_position_map);
# --verify
#import cv2
uv_texture_map_rec = cv2.remap(cropped_image, uv_position_map[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR,borderMode= cv2.BORDER_CONSTANT,borderValue=(0));
#io.imsave('{}\{}'.format(save_folder, image_name.replace('.jpg', '_tex.jpg')), np.squeeze(uv_texture_map_rec)); #Save fitted face on position map (texture).
if __name__ == '__main__':
save_folder = 'results/'
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# set para
uv_h = uv_w = 256
image_h = image_w = 256;
# load uv coords
global uv_coords
uv_coords = face3d.morphable_model.load.load_uv_coords('Data/BFM/Out/BFM_UV.mat')
uv_coords = process_uv(uv_coords, uv_h, uv_w)
'''
Save LFPW Testing as well, only the first 8.
'''
# load bfm
bfm = MorphabelModel('Data/BFM/Out/BFM.mat')
# run
content = [];
print('Running');
s =0;
f=0;
types = ['AFW', 'HELEN', 'LFPW', 'IBUG', 'LFPW_Test'];
for i in types:
print(i);
with open(i+'_Data.txt', 'r') as fileRead:
content = [file.rstrip('\n') for file in fileRead];
s=0;
print(len(content));
for filename in content:
#print(filename)
#if(s==8 and i is 'LFPW_Test'):
# break
if(s%500 ==0):
print(str(s) +'/' +str(len(content)))
image_path = 'Data/BFM/300W_LP/'+ i+'/'+filename+'.jpg';
mat_path = 'Data/BFM/300W_LP/'+i+'/'+filename+'.mat';
if(i is 'LFPW_Test'):
image_path = 'Data/BFM/300W_LP/'+ 'LFPW'+'/'+filename+'.jpg';
mat_path = 'Data/BFM/300W_LP/'+'LFPW'+'/'+filename+'.mat';
run_posmap_300W_LP(bfm, image_path, mat_path, save_folder)
s+=1;
print(s+f)
print(f)
| 41.294521 | 180 | 0.636092 | [
"MIT"
] | Nnemr/PRNet | get_300WLP_maps.py | 6,029 | Python |
from comet_ml import Experiment
experiment = Experiment(api_key="oda8KKpxlDgWmJG5KsYrrhmIV", project_name="consensusnet")
import numpy as np
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Input
from keras.layers import Conv1D, MaxPooling1D, Conv2D
import sys
module_path = '/home/diplomski-rad/consensus-net/src/python/dataset/'
if module_path not in sys.path:
print('Adding dataset module.')
sys.path.append(module_path)
import dataset
X_train = np.load('../dataset-n3-X-reshaped-train.npy')
X_validate = np.load('../dataset-n3-X-reshaped-validate.npy')
y_train = np.load('../dataset-n3-y-reshaped-train.npy')
y_validate = np.load('../dataset-n3-y-reshaped-validate.npy')
example_shape = X_train.shape[1:]
input_layer = Input(shape=example_shape)
conv_1 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(input_layer)
bn_1 = BatchNormalization()(conv_1)
conv_2 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(bn_1)
drop_1 = Dropout(0.25)(conv_2)
flatten = Flatten()(drop_1)
predictions = Dense(4, activation='softmax')(flatten)
model = Model(input_layer, predictions)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.summary())
batch_size = 10000
epochs = 50
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate))
| 32.777778 | 107 | 0.753898 | [
"MIT"
] | ajuric/consensus-net | experiments/karla/diplomski-rad/blade/pb/datasets/n3-all-indels/finished-experiments/model-n3-indel-5.py | 1,475 | Python |
# Generated by Django 3.1.1 on 2022-01-06 23:38
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='ControleOTP',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('email', models.EmailField(max_length=254)),
('codigo', models.CharField(max_length=6)),
],
),
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('email', models.EmailField(max_length=254)),
('codigo', models.CharField(max_length=6)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 60.436364 | 330 | 0.636282 | [
"MIT"
] | JGabriel-AbreuM/Pokedex | pokedex/accounts/migrations/0001_initial.py | 3,324 | Python |
# coding=utf-8
# Copyright 2020 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensemble on ImageNet.
This script only performs evaluation, not training. We recommend training
ensembles by launching independent runs of `deterministic.py` over different
seeds.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
import deterministic_model # local file import
import utils # local file import
import numpy as np
import tensorflow.compat.v2 as tf
flags.DEFINE_integer('per_core_batch_size', 512, 'Batch size per TPU core/GPU.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.mark_flag_as_required('data_dir')
flags.DEFINE_string('checkpoint_dir', None,
'The directory where the model weights are stored.')
flags.mark_flag_as_required('checkpoint_dir')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where to save predictions.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', True, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_integer('num_cores', 1, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
# Number of images in eval dataset.
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000
def ensemble_negative_log_likelihood(labels, logits):
"""Negative log-likelihood for ensemble.
For each datapoint (x,y), the ensemble's negative log-likelihood is:
```
-log p(y|x) = -log sum_{m=1}^{ensemble_size} exp(log p(y|x,theta_m)) +
log ensemble_size.
```
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
"""
labels = tf.cast(labels, tf.int32)
logits = tf.convert_to_tensor(logits)
ensemble_size = float(logits.shape[0])
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits)
return -tf.reduce_logsumexp(-nll, axis=0) + tf.math.log(ensemble_size)
def gibbs_cross_entropy(labels, logits):
"""Average cross entropy for ensemble members (Gibbs cross entropy).
For each datapoint (x,y), the ensemble's Gibbs cross entropy is:
```
GCE = - (1/ensemble_size) sum_{m=1}^ensemble_size log p(y|x,theta_m).
```
The Gibbs cross entropy approximates the average cross entropy of a single
model drawn from the (Gibbs) ensemble.
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
"""
labels = tf.cast(labels, tf.int32)
logits = tf.convert_to_tensor(logits)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits)
return tf.reduce_mean(nll, axis=0)
def main(argv):
del argv # unused arg
if not FLAGS.use_gpu:
raise ValueError('Only GPU is currently supported.')
if FLAGS.num_cores > 1:
raise ValueError('Only a single accelerator is currently supported.')
tf.enable_v2_behavior()
tf.random.set_seed(FLAGS.seed)
tf.io.gfile.makedirs(FLAGS.output_dir)
batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
dataset_test = utils.ImageNetInput(
is_training=False,
data_dir=FLAGS.data_dir,
batch_size=FLAGS.per_core_batch_size,
use_bfloat16=False).input_fn()
test_datasets = {'clean': dataset_test}
corruption_types, max_intensity = utils.load_corrupted_test_info()
for name in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset_name = '{0}_{1}'.format(name, intensity)
test_datasets[dataset_name] = utils.load_corrupted_test_dataset(
name=name,
intensity=intensity,
batch_size=FLAGS.per_core_batch_size,
drop_remainder=True,
use_bfloat16=False)
model = deterministic_model.resnet50(input_shape=(224, 224, 3),
num_classes=NUM_CLASSES)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Search for checkpoints from their index file; then remove the index suffix.
ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
'**/*.index'))
ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
ensemble_size = len(ensemble_filenames)
logging.info('Ensemble size: %s', ensemble_size)
logging.info('Ensemble number of weights: %s',
ensemble_size * model.count_params())
logging.info('Ensemble filenames: %s', str(ensemble_filenames))
checkpoint = tf.train.Checkpoint(model=model)
# Write model predictions to files.
num_datasets = len(test_datasets)
for m, ensemble_filename in enumerate(ensemble_filenames):
checkpoint.restore(ensemble_filename)
for n, (name, test_dataset) in enumerate(test_datasets.items()):
filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
filename = os.path.join(FLAGS.output_dir, filename)
if not tf.io.gfile.exists(filename):
logits = []
test_iterator = iter(test_dataset)
for _ in range(steps_per_eval):
features, _ = next(test_iterator) # pytype: disable=attribute-error
logits.append(model(features, training=False))
logits = tf.concat(logits, axis=0)
with tf.io.gfile.GFile(filename, 'w') as f:
np.save(f, logits.numpy())
percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
'Dataset {:d}/{:d}'.format(percent,
m + 1,
ensemble_size,
n + 1,
num_datasets))
logging.info(message)
metrics = {
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': ed.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
}
corrupt_metrics = {}
for name in test_datasets:
corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
corrupt_metrics['test/accuracy_{}'.format(name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(
name)] = ed.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
# Evaluate model predictions.
for n, (name, test_dataset) in enumerate(test_datasets.items()):
logits_dataset = []
for m in range(ensemble_size):
filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
filename = os.path.join(FLAGS.output_dir, filename)
with tf.io.gfile.GFile(filename, 'rb') as f:
logits_dataset.append(np.load(f))
logits_dataset = tf.convert_to_tensor(logits_dataset)
test_iterator = iter(test_dataset)
for step in range(steps_per_eval):
_, labels = next(test_iterator) # pytype: disable=attribute-error
logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
labels = tf.cast(tf.reshape(labels, [-1]), tf.int32)
negative_log_likelihood = tf.reduce_mean(
ensemble_negative_log_likelihood(labels, logits))
per_probs = tf.nn.softmax(logits)
probs = tf.reduce_mean(per_probs, axis=0)
if name == 'clean':
gibbs_ce = tf.reduce_mean(gibbs_cross_entropy(labels, logits))
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].update_state(labels, probs)
else:
corrupt_metrics['test/nll_{}'.format(name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(name)].update_state(
labels, probs)
message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
(n + 1) / num_datasets, n + 1, num_datasets))
logging.info(message)
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types,
max_intensity,
FLAGS.alexnet_errors_path)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
logging.info('Metrics: %s', total_results)
if __name__ == '__main__':
app.run(main)
| 39.58498 | 80 | 0.674988 | [
"Apache-2.0"
] | mhavasi/edward2 | baselines/imagenet/ensemble.py | 10,015 | Python |
from collections import defaultdict
from .tree import Tree
from .visitors import Transformer_InPlace
from .common import ParserConf
from .lexer import Token, PatternStr
from .parsers import earley
from .grammar import Rule, Terminal, NonTerminal
def is_discarded_terminal(t):
return t.is_term and t.filter_out
def is_iter_empty(i):
try:
_ = next(i)
return False
except StopIteration:
return True
class WriteTokensTransformer(Transformer_InPlace):
def __init__(self, tokens):
self.tokens = tokens
def __default__(self, data, children, meta):
# if not isinstance(t, MatchTree):
# return t
if not getattr(meta, 'match_tree', False):
return Tree(data, children)
iter_args = iter(children)
to_write = []
for sym in meta.orig_expansion:
if is_discarded_terminal(sym):
t = self.tokens[sym.name]
if not isinstance(t.pattern, PatternStr):
raise NotImplementedError("Reconstructing regexps not supported yet: %s" % t)
to_write.append(t.pattern.value)
else:
x = next(iter_args)
if isinstance(x, list):
to_write += x
else:
if isinstance(x, Token):
assert Terminal(x.type) == sym, x
else:
assert NonTerminal(x.data) == sym, (sym, x)
to_write.append(x)
assert is_iter_empty(iter_args)
return to_write
class MatchTree(Tree):
pass
class MakeMatchTree:
def __init__(self, name, expansion):
self.name = name
self.expansion = expansion
def __call__(self, args):
t = MatchTree(self.name, args)
t.meta.match_tree = True
t.meta.orig_expansion = self.expansion
return t
class Reconstructor:
def __init__(self, parser):
# XXX TODO calling compile twice returns different results!
tokens, rules, _grammar_extra = parser.grammar.compile()
self.write_tokens = WriteTokensTransformer({t.name:t for t in tokens})
self.rules = list(self._build_recons_rules(rules))
def _build_recons_rules(self, rules):
expand1s = {r.origin for r in rules if r.options and r.options.expand1}
aliases = defaultdict(list)
for r in rules:
if r.alias:
aliases[r.origin].append( r.alias )
rule_names = {r.origin for r in rules}
nonterminals = {sym for sym in rule_names
if sym.name.startswith('_') or sym in expand1s or sym in aliases }
for r in rules:
recons_exp = [sym if sym in nonterminals else Terminal(sym.name)
for sym in r.expansion if not is_discarded_terminal(sym)]
# Skip self-recursive constructs
if recons_exp == [r.origin]:
continue
sym = NonTerminal(r.alias) if r.alias else r.origin
yield Rule(sym, recons_exp, alias=MakeMatchTree(sym.name, r.expansion))
for origin, rule_aliases in aliases.items():
for alias in rule_aliases:
yield Rule(origin, [Terminal(alias)], alias=MakeMatchTree(origin.name, [NonTerminal(alias)]))
yield Rule(origin, [Terminal(origin.name)], alias=MakeMatchTree(origin.name, [origin]))
def _match(self, term, token):
if isinstance(token, Tree):
return Terminal(token.data) == term
elif isinstance(token, Token):
return term == Terminal(token.type)
assert False
def _reconstruct(self, tree):
# TODO: ambiguity?
callbacks = {rule: rule.alias for rule in self.rules} # TODO pass callbacks through dict, instead of alias?
parser = earley.Parser(ParserConf(self.rules, callbacks, tree.data), self._match, resolve_ambiguity=True)
unreduced_tree = parser.parse(tree.children) # find a full derivation
assert unreduced_tree.data == tree.data
res = self.write_tokens.transform(unreduced_tree)
for item in res:
if isinstance(item, Tree):
for x in self._reconstruct(item):
yield x
else:
yield item
def reconstruct(self, tree):
return ''.join(self._reconstruct(tree))
| 34.294574 | 117 | 0.6033 | [
"MIT"
] | larryk85/eosio.depman | lark/reconstruct.py | 4,424 | Python |
"""py.test fixtures for Pyramid.
http://pyramid.readthedocs.org/en/latest/narr/testing.html
"""
import datetime as datetime_module
import logging
import os
import pkg_resources
import pytest
import webtest
from dcicutils.qa_utils import notice_pytest_fixtures, MockFileSystem
from pyramid.request import apply_request_extensions
from pyramid.testing import DummyRequest
from pyramid.threadlocal import get_current_registry, manager as threadlocal_manager
from snovault import DBSESSION, ROOT, UPGRADER
from snovault.elasticsearch import ELASTIC_SEARCH, create_mapping
from snovault.util import generate_indexer_namespace_for_testing
from .conftest_settings import make_app_settings_dictionary
from .. import main
from ..loadxl import load_all
"""
README:
* This file contains application level fixtures and hooks in the server/data fixtures present in
other files.
* There are "app" based fixtures that rely only on postgres, "es_app" fixtures that
use both postgres and ES (for search/ES related testing)
"""
@pytest.fixture(autouse=True)
def autouse_external_tx(external_tx):
pass
@pytest.fixture(scope='session')
def app_settings(request, wsgi_server_host_port, conn, DBSession): # noQA - We didn't choose the fixture name.
notice_pytest_fixtures(request, wsgi_server_host_port, conn, DBSession)
settings = make_app_settings_dictionary()
settings['auth0.audiences'] = 'http://%s:%s' % wsgi_server_host_port
settings[DBSESSION] = DBSession
return settings
INDEXER_NAMESPACE_FOR_TESTING = generate_indexer_namespace_for_testing('cgap')
@pytest.fixture(scope='session')
def es_app_settings(wsgi_server_host_port, elasticsearch_server, postgresql_server, aws_auth):
settings = make_app_settings_dictionary()
settings['create_tables'] = True
settings['persona.audiences'] = 'http://%s:%s' % wsgi_server_host_port # 2-tuple such as: ('localhost', '5000')
settings['elasticsearch.server'] = elasticsearch_server
settings['sqlalchemy.url'] = postgresql_server
settings['collection_datastore'] = 'elasticsearch'
settings['item_datastore'] = 'elasticsearch'
settings['indexer'] = True
settings['indexer.namespace'] = INDEXER_NAMESPACE_FOR_TESTING
# use aws auth to access elasticsearch
if aws_auth:
settings['elasticsearch.aws_auth'] = aws_auth
return settings
def pytest_configure():
logging.basicConfig(format='%(message)s')
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
class Shorten(logging.Filter):
max_len = 500
def filter(self, record):
if record.msg == '%r':
record.msg = record.msg % record.args
record.args = ()
if len(record.msg) > self.max_len:
record.msg = record.msg[:self.max_len] + '...'
return True
logging.getLogger('sqlalchemy.engine.base.Engine').addFilter(Shorten())
@pytest.yield_fixture
def threadlocals(request, dummy_request, registry):
notice_pytest_fixtures(request, dummy_request, registry)
threadlocal_manager.push({'request': dummy_request, 'registry': registry})
yield dummy_request
threadlocal_manager.pop()
class MyDummyRequest(DummyRequest):
def remove_conditional_headers(self):
pass
def _get_registry(self):
if self._registry is None:
return get_current_registry()
return self._registry
def _set_registry(self, registry):
self.__dict__['registry'] = registry
def _del_registry(self):
self._registry = None
registry = property(_get_registry, _set_registry, _del_registry)
@pytest.fixture
def dummy_request(root, registry, app):
request = app.request_factory.blank('/dummy')
request.root = root
request.registry = registry
request._stats = {}
request.invoke_subrequest = app.invoke_subrequest
apply_request_extensions(request)
return request
@pytest.fixture(scope='session')
def app(app_settings):
""" WSGI application level functional testing. """
return main({}, **app_settings)
@pytest.fixture(scope='session')
def es_app(es_app_settings, **kwargs):
"""
App that uses both Postgres and ES - pass this as "app" argument to TestApp.
Pass all kwargs onto create_mapping
"""
app = main({}, **es_app_settings)
create_mapping.run(app, **kwargs)
return app
@pytest.fixture
def registry(app):
return app.registry
@pytest.fixture
def elasticsearch(registry):
return registry[ELASTIC_SEARCH]
@pytest.fixture
def upgrader(registry):
return registry[UPGRADER]
@pytest.fixture
def root(registry):
return registry[ROOT]
# Available Fixtures
# ------------------
#
# ################## +-----------------------------------------+----------------------------------------------------+
# ################## | Basic Application | Application with ES + Postgres |
# ################## +-----------------------+-----------------+---------------------------+------------------------+
# ################## | JSON content | HTML content | JSON content | HTML content |
# -------------------+-----------------------+-----------------+---------------------------+------------------------+
# Anonymous User | anontestapp | anonhtmltestapp | anon_es_testapp | anon_html_es_testapp |
# -------------------+-----------------------+-----------------+---------------------------+------------------------+
# System User | testapp | htmltestapp | es_testapp | html_es_testapp |
# -------------------+-----------------------+-----------------+---------------------------+------------------------+
# Authenticated User | authenticated_testapp | ----- | authenticated_es_testapp | ----- |
# -------------------+-----------------------+-----------------+---------------------------+------------------------+
# Submitter User | submitter_testapp | ----- | ----- | ----- |
# -------------------+-----------------------+-----------------+---------------------------+------------------------+
# Indexer User | ----- | ----- | indexer_testapp | ----- |
# -------------------+-----------------------+-----------------+---------------------------+------------------------+
# Embed User | embed_testapp | ----- | ----- | ----- |
# -------------------+-----------------------+-----------------+---------------------------+------------------------+
#
# TODO: Reconsider naming to have some underscores interspersed for better readability.
# e.g., html_testapp rather than htmltestapp, and especially anon_html_test_app rather than anonhtmltestapp.
# -kmp 03-Feb-2020
@pytest.fixture
def anontestapp(app):
"""TestApp for anonymous user (i.e., no user specified), accepting JSON data."""
environ = {
'HTTP_ACCEPT': "application/json"
}
return webtest.TestApp(app, environ)
@pytest.fixture
def anonhtmltestapp(app):
"""TestApp for anonymous (not logged in) user, accepting text/html content."""
environ = {
'HTTP_ACCEPT': 'text/html'
}
test_app = webtest.TestApp(app, environ)
return test_app
@pytest.fixture
def anon_es_testapp(es_app):
""" TestApp simulating a bare Request entering the application (with ES enabled) """
environ = {
'HTTP_ACCEPT': 'application/json'
}
return webtest.TestApp(es_app, environ)
@pytest.fixture
def anon_html_es_testapp(es_app):
"""TestApp with ES + Postgres for anonymous (not logged in) user, accepting text/html content."""
environ = {
'HTTP_ACCEPT': 'text/html'
}
return webtest.TestApp(es_app, environ)
@pytest.fixture(scope="session")
def testapp(app):
"""TestApp for username TEST, accepting JSON data."""
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST'
}
return webtest.TestApp(app, environ)
@pytest.fixture
def htmltestapp(app):
"""TestApp for TEST user, accepting text/html content."""
environ = {
'HTTP_ACCEPT': 'text/html',
'REMOTE_USER': 'TEST',
}
test_app = webtest.TestApp(app, environ)
return test_app
@pytest.fixture(scope='session')
def es_testapp(es_app):
""" TestApp with ES + Postgres. Must be imported where it is needed. """
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST',
}
return webtest.TestApp(es_app, environ)
@pytest.fixture
def html_es_testapp(es_app):
"""TestApp with ES + Postgres for TEST user, accepting text/html content."""
environ = {
'HTTP_ACCEPT': 'text/html',
'REMOTE_USER': 'TEST',
}
return webtest.TestApp(es_app, environ)
@pytest.fixture
def authenticated_testapp(app):
"""TestApp for an authenticated, non-admin user (TEST_AUTHENTICATED), accepting JSON data."""
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST_AUTHENTICATED',
}
return webtest.TestApp(app, environ)
@pytest.fixture
def authenticated_es_testapp(es_app):
""" TestApp for authenticated non-admin user with ES """
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST_AUTHENTICATED',
}
return webtest.TestApp(es_app, environ)
@pytest.fixture
def submitter_testapp(app):
"""TestApp for a non-admin user (TEST_SUBMITTER), accepting JSON data."""
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST_SUBMITTER',
}
return webtest.TestApp(app, environ)
@pytest.fixture
def indexer_testapp(es_app):
""" Indexer testapp, meant for manually triggering indexing runs by posting to /index.
Always uses the ES app (obviously, but not so obvious previously) """
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'INDEXER',
}
return webtest.TestApp(es_app, environ)
@pytest.fixture
def embed_testapp(app):
"""TestApp for user EMBED, accepting JSON data."""
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'EMBED',
}
return webtest.TestApp(app, environ)
@pytest.fixture
def wsgi_app(wsgi_server):
"""TestApp for WSGI server."""
return webtest.TestApp(wsgi_server)
class WorkbookCache:
""" Caches whether or not we have already provisioned the workbook. """
done = None
@classmethod
def initialize_if_needed(cls, es_app):
if not cls.done:
cls.done = cls.make_fresh_workbook(es_app)
@classmethod
def make_fresh_workbook(cls, es_app):
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST',
}
testapp = webtest.TestApp(es_app, environ)
# Just load the workbook inserts
# Note that load_all returns None for success or an Exception on failure.
load_res = load_all(testapp, pkg_resources.resource_filename('encoded', 'tests/data/workbook-inserts/'), [])
if isinstance(load_res, Exception):
raise load_res
elif load_res:
raise RuntimeError("load_all returned a true value that was not an exception.")
testapp.post_json('/index', {})
return True
@pytest.fixture(scope='session')
def workbook(es_app):
""" Loads a bunch of data (tests/data/workbook-inserts) into the system on first run
(session scope doesn't work). """
WorkbookCache.initialize_if_needed(es_app)
@pytest.yield_fixture
def mocked_file_system():
with MockFileSystem(auto_mirror_files_for_read=True).mock_exists_open_remove():
yield
| 32.627397 | 118 | 0.605508 | [
"MIT"
] | dbmi-bgm/cgap-portal | src/encoded/tests/conftest.py | 11,909 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
# fmt: off
def build_get_method_local_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2.0"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/azurespecials/apiVersion/method/string/none/query/local/2.0')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_method_local_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/azurespecials/apiVersion/method/string/none/query/local/null')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if api_version is not None:
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_path_local_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2.0"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/azurespecials/apiVersion/path/string/none/query/local/2.0')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_swagger_local_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2.0"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/azurespecials/apiVersion/swagger/string/none/query/local/2.0')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class ApiVersionLocalOperations(object):
"""ApiVersionLocalOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azurespecialproperties.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_method_local_valid(
self, **kwargs # type: Any
):
# type: (...) -> None
"""Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_method_local_valid_request(
template_url=self.get_method_local_valid.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
get_method_local_valid.metadata = {"url": "/azurespecials/apiVersion/method/string/none/query/local/2.0"} # type: ignore
@distributed_trace
def get_method_local_null(
self,
api_version=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Get method with api-version modeled in the method. pass in api-version = null to succeed.
:param api_version: This should appear as a method parameter, use value null, this should
result in no serialized parameter.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_method_local_null_request(
api_version=api_version,
template_url=self.get_method_local_null.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
get_method_local_null.metadata = {"url": "/azurespecials/apiVersion/method/string/none/query/local/null"} # type: ignore
@distributed_trace
def get_path_local_valid(
self, **kwargs # type: Any
):
# type: (...) -> None
"""Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_path_local_valid_request(
template_url=self.get_path_local_valid.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
get_path_local_valid.metadata = {"url": "/azurespecials/apiVersion/path/string/none/query/local/2.0"} # type: ignore
@distributed_trace
def get_swagger_local_valid(
self, **kwargs # type: Any
):
# type: (...) -> None
"""Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_swagger_local_valid_request(
template_url=self.get_swagger_local_valid.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
get_swagger_local_valid.metadata = {"url": "/azurespecials/apiVersion/swagger/string/none/query/local/2.0"} # type: ignore
| 39.019231 | 127 | 0.672334 | [
"MIT"
] | Azure/autorest.python | test/azure/legacy/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/operations/_api_version_local_operations.py | 12,174 | Python |
import os
from glob import glob
from tqdm import tqdm
from pathlib import Path
# from kaggle_isic_2020.lib import dirs # Doesn't work on unix, why?
# Test
source_dir = "/home/common/datasets/SIIM-ISIC_2020_Melanoma/jpeg/test/"
dest_dir = "/home/common/datasets/SIIM-ISIC_2020_Melanoma/jpeg/test_compact/"
# dirs.create_folder(dest_dir)
file_list = glob(source_dir+"*.jpg")
print("Converting image files...")
for file_path in tqdm(file_list):
file_name = Path(file_path).name
os.system("convert \""+file_path+"[512x]\" -set filename:base \"%[basename]\" \""+dest_dir+"/%[filename:base].jpg\"")
| 33.666667 | 121 | 0.735974 | [
"MIT"
] | olavosamp/kaggle_isic_2020 | dataset_stats/convert_dataset_test_set.py | 606 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import pdb
import numpy as np
from os.path import join
from ops import l2_dist_360
from MeanOverlap import MeanOverlap
def catData(totalData, newData):
""" Concat data from scratch """
if totalData is None:
totalData = newData[np.newaxis].copy()
else:
totalData = np.concatenate((totalData, newData[np.newaxis]))
return totalData
def score(Agent, seq1, seq2, _full=True):
""" Calculate IoU """
acc = 0.0
total_num = 0
MO = MeanOverlap(Agent.W, Agent.H)
for batch in xrange(Agent.batch_size):
for i in xrange(Agent.n_frames):
if not _full and np.sum(seq2[batch][i]) == 0:
continue
acc += MO.IOU((seq1[batch, i, 0], seq1[batch, i, 1]), (seq2[batch, i, 0], seq2[batch, i, 1]))
total_num += 1
return (acc / total_num) if total_num != 0 else 0 #(n_frames*batch_size)
def printAcc(threshold, targetFrNum, totalFrNum):
""" Fetch accuracy and print out """
print "Acc is:"
for th in threshold:
print ("%d" %(th)),
print
for i, types in enumerate(targetFrNum):
print i if i < 4 else (i-4),
for j, th in enumerate(threshold):
print ("%.5f" %(types[j] / (totalFrNum if totalFrNum > 0 else 1))),
print
def cal_accuracy(Agent, pred, gt, targetFrNum, totalFrNum):
""" Calculate and return accuracy """
if np.sum(gt) == 0:
return targetFrNum, totalFrNum
l2_dist = l2_dist_360(pred, gt, Agent.W)
l2_dist = np.tile(l2_dist,(len(Agent.threshold), 1))
""" if l2_dist(10 x 50) <= thres(1 x 10), then targetFrNum(8types x 10thres) += 1 """
thres = np.sum(l2_dist <= np.tile(np.reshape(Agent.threshold, (-1, 1)), (1, l2_dist.shape[-1])), axis=1)
center = np.array([Agent.W/2, Agent.H/2])
for th, i in enumerate(thres):
if np.min(np.linalg.norm(gt - center, axis=1)) > 100:
targetFrNum[i,th] += 1
else:
targetFrNum[i+4,th] += 1
totalFrNum += 1
return targetFrNum, totalFrNum
def load_batch_data(Agent, path, num_batch, _copy=False, _augment=False):
""" Load batch data from path and normalize them, use copy to preserve raw data """
data = np.load(join(path, 'pruned_roisavg/batch_{}.npy'.format(num_batch))) #[0:Agent.batch_size,0:Agent.n_frames,0:Agent.n_detection,0:Agent.n_input]
oracle_viewangle = np.load(join(path, 'label/batch_{}.npy'.format(num_batch))) #[0:Agent.batch_size,0:Agent.n_frames,0:Agent.n_classes+1]
one_hot_labels = np.load(join(path, 'onehot/batch_{}.npy'.format(num_batch))) #[0:Agent.batch_size,0:Agent.n_frames,0:Agent.n_detection]
hof = np.load(join(path, 'hof/batch_{}.npy'.format(num_batch))) #[0:Agent.batch_size,0:Agent.n_frames,0:Agent.n_detection,0:Agent.n_bin_size]
box_center = np.load(join(path, 'divide_area_pruned_boxes/batch_{}.npy'.format(num_batch))) #[0:Agent.batch_size,0:Agent.n_frames,0:Agent.n_detection,0:]
img = np.zeros((Agent.batch_size), dtype=np.float16)
if _augment is True:
data, oracle_viewangle, box_center = augment_data(data, oracle_viewangle, box_center)
if _copy is True:
box = np.copy(box_center)
gt = np.copy(oracle_viewangle)[:, :, :2]
else:
box = None
gt = None
box_center[:,:,:,0] = (box_center[:,:,:,0]/Agent.W + box_center[:,:,:,2]/Agent.W)/2
box_center[:,:,:,1] = (box_center[:,:,:,1]/Agent.H + box_center[:,:,:,3]/Agent.H)/2
box_center = box_center[:, :, :, :2]
oracle_viewangle[:,:,0] = oracle_viewangle[:,:,0]/Agent.W
oracle_viewangle[:,:,1] = oracle_viewangle[:,:,1]/Agent.H
oracle_viewangle = oracle_viewangle[:, :, :2]
return data, one_hot_labels, oracle_viewangle, box_center, hof, img, box, gt
def visual_gaze(Agent, img_name, gt, pred, alphas, box):
"""
[Deprecated]
Draw and plot visual gaze contains boxes, gt gazes, and prediction
"""
print Agent.img_path + img_name + '.jpg'
img = cv2.imread(Agent.img_path + img_name + '.jpg',3)
if img is None:
print 'No image is found.'
return 1
img = cv2.resize(img, (int(W),int(H)))
W = Agent.W
H = Agent.H
# Box
idx = 0
transparent = 0.90
for xmin, ymin, xmax, ymax in box.astype(np.int32):
if xmax > W: xmax = int(W)
if ymax > H: ymax = int(H)
print xmin, ymin, xmax, ymax, alphas[idx]
#if alphas[idx] > 0.0:
cv2.rectangle(img,(xmin, ymin),(xmax, ymax), (255,255,255), 2)
img[ymin:ymax,xmin:xmax,:] = img[ymin:ymax,xmin:xmax,:]*0.95 + np.ones((ymax-ymin,xmax-xmin,3))*0.05
cv2.putText(img, ("{0:.2f}").format(alphas[idx]), (int((xmax+xmin)/2)+1 , int((ymax+ymin)/2)+1), cv2.FONT_HERSHEY_SIMPLEX, 1.50, (0,0,0), 2)
cv2.putText(img, ("{0:.2f}").format(alphas[idx]), (int((xmax+xmin)/2) , int((ymax+ymin)/2)), cv2.FONT_HERSHEY_SIMPLEX, 1.50, (255,255,255), 2)
idx += 1
# Predicted gaze
ll = 3
# Desire gaze
color = [(255, 0, 0), (0,255,0),(0,255,255),(0,0,255)] # Green, Yellow, Red
i = 2
u, v = gt.astype(np.int32)
img[v-ll:v+ll,u-ll:u+ll,1] = 255
cv2.circle(img,(u,v),10,color[i],2) # desize gaze centers
xmin = u - int(W/4) if u > W/4 else 0
xmax = u + int(W/4) if u < 3*W/4 else int(W)
ymin = v - int(H/4) if v > H/4 else 0
ymax = v + int(H/4) if v < 3*H/4 else int(H)
cv2.rectangle(img,(xmin, ymin),(xmax, ymax), color[i], 2)
img[ymin:ymax,xmin:xmax,:] = img[ymin:ymax,xmin:xmax,:]*transparent + \
np.tile(np.array([clr for clr in color[i]])*(1-transparent),(ymax-ymin,xmax-xmin,1))
print ("gt: ({}, {})").format(u, v)
# Predicted gaze
i = 0
u, v = int(pred[0]), int(pred[1])
img[v-ll:v+ll,u-ll:u+ll,2] = 255
cv2.circle(img,(u,v),10,(255,0,0),2) # predicted gaze center
xmin = u - int(W/4) if u > W/4 else 0
xmax = u + int(W/4) if u < 3*W/4 else int(W)
ymin = v - int(H/4) if v > H/4 else 0
ymax = v + int(H/4) if v < 3*H/4 else int(H)
cv2.rectangle(img,(xmin, ymin),(xmax, ymax), color[i], 2)
img[ymin:ymax,xmin:xmax,:] = img[ymin:ymax,xmin:xmax,:]*transparent + \
np.tile(np.array([clr for clr in color[i]])*(1-transparent),(ymax-ymin,xmax-xmin,1))
print ("pred: ({}, {})").format(u, v)
img = cv2.resize(img, (800,400))
if Agent._save_img:
cv2.imwrite(save_path+img_name+'.jpg', img)
else:
cv2.imshow("gaze", img)
key = cv2.waitKey(0) & 0xFF
if key == 27:
return -1
elif key == ord('q'):
return -2
elif key == ord('s'):
return -3
elif key == ord('c'):
Agent._save_img = not Agent._save_img
return 0
else:
return 0
| 35.890052 | 157 | 0.59008 | [
"MIT"
] | remega/OF | Deep360Pilot-CVPR17-tf1.2/util.py | 6,855 | Python |
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2015, Code for America
# This is open source software, released under a standard 3-clause
# BSD-style license; see the file LICENSE for details.
import os
import datetime
import re
from flask import Flask, render_template, request, abort, redirect, url_for, make_response, session, flash
from werkzeug.contrib.atom import AtomFeed
import requests
import iso8601
import pytz
import updater
import open311tools
__version__ = '1.0.2'
# Config
DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'configuration.py')
# Quick-start config. You should really put something in
# ./configuration.py or set the SRTRACKER_CONFIGURATION env var instead.
DEBUG = True
OPEN311_SERVER = 'http://localhost:5000'
OPEN311_API_KEY = ''
PASSWORD_PROTECTED = False
SECRET_KEY = 'please_please_change_this!'
app = Flask(__name__)
@app.before_request
def password_protect():
# don't password-protect images (for e-mail!)
if app.config['PASSWORD_PROTECTED'] and not request.path.startswith('/static/img'):
auth = request.authorization
if not auth or auth.password != app.config['PASSWORD']:
# Tell the browser to do basic auth
return make_response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
#--------------------------------------------------------------------------
# ROUTES
#--------------------------------------------------------------------------
@app.route("/", defaults={'page': 1, 'service_code': ''})
@app.route("/<int:page>", defaults={'service_code': ''})
@app.route("/<int:page>/<service_code>")
def index(page, service_code):
if 'filter' in request.args:
service_code = request.args['filter']
url = '%s/requests.json' % app.config['OPEN311_SERVER']
recent_sr_timeframe = app.config.get('RECENT_SRS_TIME')
# If SRS_PAGE_SIZE is set, use paging. Otherwise, fall back to a non-paged list from MAX_RECENT_SRS
page_size = app.config.get('SRS_PAGE_SIZE')
paged = page_size > 0
if not paged:
page_size = app.config.get('MAX_RECENT_SRS', 50)
page = 1
services_list = open311tools.services(app.config['OPEN311_SERVER'], app.config['OPEN311_API_KEY'])
service_name = ''
for service in services_list:
if service_code == service['service_code']:
service_name = service['service_name']
break
if not service_name:
service_code = ''
params = {
'extensions': 'true',
'page_size': page_size,
'page': page,
'service_code': service_code
}
if recent_sr_timeframe:
start_datetime = datetime.datetime.utcnow() - datetime.timedelta(seconds=recent_sr_timeframe)
params['start_date'] = start_datetime.isoformat() + 'Z'
if app.config['OPEN311_API_KEY']:
params['api_key'] = app.config['OPEN311_API_KEY']
r = requests.get(url, params=params)
if r.status_code != 200:
app.logger.error('OPEN311: Failed to load recent requests from Open311 server. Status Code: %s, Response: %s', r.status_code, r.text)
service_requests = None
else:
# need to slice with page_size in case an endpoint doesn't support page_size its API (it's non-standard)
service_requests = r.json[:page_size]
# we might receive SRs that were updated in the future (!); pretend like those updates were just now.
# fixes https://github.com/codeforamerica/srtracker/issues/80
now = datetime.datetime.utcnow()
for sr in service_requests:
if 'updated_datetime' in sr:
# parse and ensure the date is naive for comparison to utcnow
updated = iso8601.parse_date(sr['updated_datetime']) \
.astimezone(pytz.utc).replace(tzinfo=None)
sr['updated_datetime'] = min(now, updated)
return render_app_template('index.html',
service_requests = service_requests,
page = page,
services_list = services_list,
service_code = service_code,
service_name = service_name)
@app.route("/requests/")
def request_search():
if 'request_id' in request.args:
return redirect(url_for('show_request', request_id=request.args['request_id']))
else:
abort(404)
@app.route("/requests/<request_id>", methods=["GET", "POST"])
def show_request(request_id):
request_id = request_id.lstrip('#')
# receive subscription
form_errors = []
submitted_email = None
if request.method == 'POST':
submitted_email = request.form.get('update_email')
if submitted_email:
success = subscribe_to_sr(request_id, submitted_email)
if not success:
form_errors.append('Please use a valid e-mail address.')
# TODO: Should probably use Three or something nice for this...
url = '%s/requests/%s.json' % (app.config['OPEN311_SERVER'], request_id)
params = {'extensions': 'true', 'legacy': 'false'}
if app.config['OPEN311_API_KEY']:
params['api_key'] = app.config['OPEN311_API_KEY']
r = requests.get(url, params=params)
if r.status_code == 404:
# TODO: how to generalize this?
# Chicago's SR IDs are always \d\d-\d{8}, if we get just digits, reformat and try again
request_id_digits = re.sub(r'\D', '', request_id)
if len(request_id_digits) == 8:
# Try prepending the year if it's only 8 digits
request_id_digits = datetime.date.today().strftime('%y') + request_id_digits
if len(request_id_digits) == 10:
reformatted = '%s-%s' % (request_id_digits[:2], request_id_digits[2:])
if reformatted != request_id:
return redirect(url_for('show_request', request_id=reformatted))
# It would be nice to log this for analytical purposes (what requests are being checked that we can't show?)
# but that would be better done through GA or KISS Metrics than through server logging
services = open311tools.services(app.config['OPEN311_SERVER'], app.config['OPEN311_API_KEY'])
return render_app_template('error_no_sr.html', request_id=request_id, services=services), 404
elif r.status_code != 200:
app.logger.error('OPEN311: Error (not 404) loading data for SR %s', request_id)
return render_app_template('error_311_api.html', request_id=request_id), 500
srs = r.json
if srs:
sr = fixup_sr(srs[0], request_id)
if 'requested_datetime' in sr:
sr['requested_datetime'] = iso8601.parse_date(sr['requested_datetime'])
# sometimes an SR doesn't include notes even though there should always be an "opened" note
if 'notes' not in sr:
sr['notes'] = []
relevant_notes = 0
for note in sr['notes']:
note['datetime'] = iso8601.parse_date(note['datetime'])
if note['type'] in ('follow_on', 'follow_on_created', 'activity', 'closed'):
relevant_notes += 1
# add follow-on closure data, fix types, etc, etc
by_id = {}
follow_on_open_count = 0
follow_on_close_count = 0
for note in sr['notes']:
if note['type'] in ('follow_on', 'follow_on_created', 'follow_on_closed'):
note_sr_id = note['extended_attributes']['service_request_id']
# old-style is just "follow_on" for everything related to follow-ons
# new-style is "follow_on_created" and "follow_on_closed"
# update old notes so templates don't get crazy complicated :(
if note['type'] == 'follow_on_created' or note['description'].endswith('Created'):
note['type'] = 'follow_on_created'
follow_on_open_count += 1
by_id[note_sr_id] = note
elif note['type'] == 'follow_on_closed' or note['description'].endswith('Closed'):
follow_on_close_count += 1
note['type'] = 'follow_on_closed'
if note_sr_id in by_id:
original = by_id[note_sr_id]
original['extended_attributes']['closed_datetime'] = note['datetime']
# if we hit any follow_on_opened notes
if follow_on_open_count > 0:
# remove the notes that claim the request is closed
sr['notes'] = [n for n in sr['notes'] if not n['type'] == 'closed']
# set the request to open
sr['status'] = 'open'
# if we hit as many follow_on_closed as follow_on_opened notes, then request is really closed
if follow_on_open_count == follow_on_close_count:
# set the request status to closed
sr['status'] = 'closed'
tmp_note = {}
# add a closing note
tmp_note['type'] = 'closed'
tmp_note['summary'] = 'Request Completed'
# this is brittle, but shouldn't break
tmp_datetime = sorted([n['extended_attributes']['closed_datetime'] for n in by_id.values()])
# set the closed datetime to be the datetime of the last-closed follow-on
tmp_note['datetime'] = tmp_datetime[0]
# add the extra note
sr['notes'].append(tmp_note)
# if there's no activity yet, show 'under review'
if relevant_notes == 0:
sr['notes'].append({
'type': 'activity',
'summary': 'Under review by %s staff' % sr.get('agency_responsible', '')
})
subscribed = False
if sr['status'] == 'open' and session.get('addr', None):
# TODO: when subscription service supports more than e-mail,
# we should probably be able to show all your subscriptions here
subscribed = updater.subscription_exists(request_id, 'email', session.get('addr', ''))
# test media
# sr['media_url'] = sr['media_url'] or 'http://farm5.staticflickr.com/4068/4286605571_c1a1751fdc_n.jpg'
body = render_app_template('service_request.html', sr=sr, subscribed=subscribed, errors=form_errors, submitted_email=submitted_email)
return (body, 200, None)
else:
return render_app_template('error_no_sr.html', request_id=request_id), 404
@app.route("/subscribe/<request_id>", methods=["POST"])
def subscribe(request_id):
email = request.form.get('update_email')
if email:
success = subscribe_to_sr(request_id, email)
if not success:
flash('Please use a valid e-mail address.', 'error')
return redirect(url_for('show_request', request_id=request_id))
@app.route("/unsubscribe/<subscription_key>", methods=["GET", "POST"])
def unsubscribe(subscription_key):
subscription = updater.subscription_for_key(subscription_key)
if subscription:
sr_id = subscription.sr_id
updater.unsubscribe_with_key(subscription_key)
destination = url_for('show_request', request_id=sr_id)
else:
destination = url_for('index')
flash(u'You‘ve been unsubscribed from this service request. You will no longer receive e-mails when it is updated.')
return redirect(destination)
#--------------------------------------------------------------------------
# SYNDICATION
#--------------------------------------------------------------------------
@app.route('/recent.atom')
def recent_feed():
atom_size = app.config.get('ATOM_SIZE', 25)
url = '%s/requests.json' % app.config['OPEN311_SERVER']
recent_sr_timeframe = app.config.get('RECENT_SRS_TIME')
params = {
'extensions': 'true',
'page_size': atom_size
}
if recent_sr_timeframe:
start_datetime = datetime.datetime.utcnow() - datetime.timedelta(seconds=recent_sr_timeframe)
params['start_date'] = start_datetime.isoformat() + 'Z'
if app.config['OPEN311_API_KEY']:
params['api_key'] = app.config['OPEN311_API_KEY']
r = requests.get(url, params=params)
if r.status_code != 200:
app.logger.error('OPEN311: Failed to load recent requests from Open311 server. Status Code: %s, Response: %s', r.status_code, r.text)
service_requests = None
else:
# need to slice with atom_size in case an endpoint doesn't support page_size
service_requests = r.json[:atom_size]
# generate feed
feed = AtomFeed('Recently Updated Service Requests',
feed_url=request.url, url=request.url_root)
if service_requests:
for sr in service_requests:
if 'service_request_id' in sr:
sr['requested_datetime'] = iso8601.parse_date(sr['requested_datetime'])
sr['updated_datetime'] = iso8601.parse_date(sr['updated_datetime'])
title = '%s #%s' % (sr['service_name'], sr['service_request_id'])
# in principle, this could be the result of a templating operation
body = sr.get('description','')
if body:
body += '<br /><br />'
body += sr['address']
feed.add(title,
unicode(body),
content_type='html',
author=sr['agency_responsible'],
url=url_for('show_request',
request_id=sr['service_request_id']),
updated=sr['updated_datetime'],
published=sr['requested_datetime'])
return feed.get_response()
#--------------------------------------------------------------------------
# ERRORS
#--------------------------------------------------------------------------
@app.errorhandler(404)
def page_not_found(error):
return render_app_template('error_404.html'), 404
@app.errorhandler(500)
def generic_error(error):
return render_app_template('error_generic.html'), 500
#--------------------------------------------------------------------------
# FILTERS
#--------------------------------------------------------------------------
# Friendly time by Sean Vieira (http://flask.pocoo.org/snippets/33/)
@app.template_filter()
def friendly_time(dt, past_="ago", future_="from now", default="just now"):
"""
Returns string representing "time since"
or "time until" e.g.
3 days ago, 5 hours from now etc.
"""
if dt is None:
return ''
if isinstance(dt, basestring):
dt = iso8601.parse_date(dt)
# ensure the date is naive for comparison to utcnow
if dt.tzinfo:
dt = dt.astimezone(pytz.utc).replace(tzinfo=None)
now = datetime.datetime.utcnow()
if now > dt:
diff = now - dt
dt_is_past = True
else:
diff = dt - now
dt_is_past = False
periods = (
(diff.days / 365, "year", "years"),
(diff.days / 30, "month", "months"),
(diff.days / 7, "week", "weeks"),
(diff.days, "day", "days"),
(diff.seconds / 3600, "hour", "hours"),
(diff.seconds / 60, "minute", "minutes"),
(diff.seconds, "second", "seconds"),
)
for period, singular, plural in periods:
if period:
return "%d %s %s" % (period,
singular if period == 1 else plural,
past_ if dt_is_past else future_)
return default
state_pattern = re.compile(r'\b(\w\w)(,?\s*\d{5}(?:-\d{4})?)?$')
@app.template_filter()
def title_address(address):
'''Slightly improved title() method for address strings
Makes sure state abbreviations are upper-case.'''
titled = address.title()
titled = state_pattern.sub(lambda match: match.group(1).upper() + (match.group(2) or ''), titled)
return titled
#--------------------------------------------------------------------------
# UTILITIES
#--------------------------------------------------------------------------
def render_app_template(template, **kwargs):
'''Add some goodies to all templates.'''
if 'config' not in kwargs:
kwargs['config'] = app.config
if '__version__' not in kwargs:
kwargs['__version__'] = __version__
return render_template(template, **kwargs)
def fixup_sr(sr, request_id=None):
'''
Fix up an SR to try and ensure some basic info.
(In Chicago's API, any field can be missing, even if it's required.)
'''
remove_blacklisted_fields(sr)
if 'service_request_id' not in sr:
sr['service_request_id'] = request_id or sr.get('token', 'UNKNOWN')
if 'status' not in sr:
sr['status'] = 'open'
if 'service_name' not in sr:
sr['service_name'] = 'Miscellaneous Services'
return sr
def remove_blacklisted_fields(sr):
blacklist = app.config.get('SR_FIELD_BLACKLIST')
if blacklist:
for field in blacklist:
if field in sr:
del sr[field]
def subscribe_to_sr(request_id, email):
# validate e-mail
match = re.match(r'[A-Z0-9._%+\-]+@[A-Z0-9.\-]+\.[A-Z]{2,4}$', email, re.IGNORECASE)
if match:
key = updater.subscribe(request_id, 'email', email)
if key:
# TODO: should we use the subscription key instead?
session['addr'] = email
session.permanent = True
return True
else:
app.logger.error('Error creating a subscription for %s on %s', email, request_id)
return False
#--------------------------------------------------------------------------
# INIT
#--------------------------------------------------------------------------
if __name__ == "__main__":
app.config.from_object(__name__)
# we want to support a nice fallback, so use from_pyfile directly instead of from_envvar
config_path = os.path.abspath(os.environ.get('SRTRACKER_CONFIGURATION', DEFAULT_CONFIG_PATH))
if os.path.isfile(config_path):
app.config.from_pyfile(config_path)
else:
app.logger.warn('''YOU ARE USING THE QUICK-START CONFIG, WHICH IS NOT RECOMMENDED.
PUT SOMETHING IN "./configuration.py" OR SET THE "SRTRACKER_CONFIGURATION" ENV VAR INSTEAD.''')
port = int(os.environ.get('PORT', 5100))
app.run(host='0.0.0.0', port=port)
| 38.440748 | 141 | 0.592753 | [
"BSD-3-Clause"
] | codeforamerica/srtracker | app.py | 18,492 | Python |
# Copyright 2015 moco_beta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
from janome.sysdic import all_fstdata, entries, mmap_entries, connections, chardef, unknowns
from janome.dic import (
SystemDictionary,
MMapSystemDictionary,
UserDictionary,
CompiledUserDictionary,
FILE_USER_FST_DATA,
FILE_USER_ENTRIES_DATA
)
from janome.progress import SimpleProgressIndicator, logger as p_logger
# TODO: better way to find package...
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)
class TestDictionary(unittest.TestCase):
def test_system_dictionary_ipadic(self):
sys_dic = SystemDictionary(all_fstdata(), entries(), connections, chardef.DATA, unknowns.DATA)
self.assertEqual(7, len(sys_dic.lookup('形態素'.encode('utf-8'))))
self.assertEqual(1, sys_dic.get_trans_cost(0, 1))
self.assertEqual({'HIRAGANA': []}, sys_dic.get_char_categories('は'))
self.assertEqual({'KATAKANA': []}, sys_dic.get_char_categories('ハ'))
self.assertEqual({'KATAKANA': []}, sys_dic.get_char_categories('ハ'))
self.assertEqual({'KANJI': []}, sys_dic.get_char_categories('葉'))
self.assertEqual({'ALPHA': []}, sys_dic.get_char_categories('C'))
self.assertEqual({'ALPHA': []}, sys_dic.get_char_categories('C'))
self.assertEqual({'SYMBOL': []}, sys_dic.get_char_categories('#'))
self.assertEqual({'SYMBOL': []}, sys_dic.get_char_categories('#'))
self.assertEqual({'NUMERIC': []}, sys_dic.get_char_categories('5'))
self.assertEqual({'NUMERIC': []}, sys_dic.get_char_categories('5'))
self.assertEqual({'KANJI': [], 'KANJINUMERIC': ['KANJI']}, sys_dic.get_char_categories('五'))
self.assertEqual({'GREEK': []}, sys_dic.get_char_categories('Γ'))
self.assertEqual({'CYRILLIC': []}, sys_dic.get_char_categories('Б'))
self.assertEqual({'DEFAULT': []}, sys_dic.get_char_categories('𠮷'))
self.assertEqual({'DEFAULT': []}, sys_dic.get_char_categories('한'))
self.assertTrue(sys_dic.unknown_invoked_always('ALPHA'))
self.assertFalse(sys_dic.unknown_invoked_always('KANJI'))
self.assertTrue(sys_dic.unknown_grouping('NUMERIC'))
self.assertFalse(sys_dic.unknown_grouping('KANJI'))
self.assertEqual(2, sys_dic.unknown_length('HIRAGANA'))
def test_property_types(self):
sys_dic = SystemDictionary(all_fstdata(), entries(), connections, chardef.DATA, unknowns.DATA)
# entry in the system dictionary
entry = sys_dic.lookup('すもも'.encode('utf8'))[0]
self.assertTrue(type(entry[1]) is str)
self.assertTrue(type(entry[0]) is int)
self.assertTrue(type(entry[2]) is int)
self.assertTrue(type(entry[3]) is int)
self.assertTrue(type(entry[4]) is int)
entry_extra = sys_dic.lookup_extra(entry[0])
self.assertTrue(type(entry_extra[0]) is str)
self.assertTrue(type(entry_extra[1]) is str)
self.assertTrue(type(entry_extra[2]) is str)
self.assertTrue(type(entry_extra[3]) is str)
self.assertTrue(type(entry_extra[4]) is str)
self.assertTrue(type(entry_extra[5]) is str)
# unknown entry
entry = sys_dic.unknowns.get(u'HIRAGANA')[0]
self.assertTrue(type(entry[3]) is str)
self.assertTrue(type(entry[0]) is int)
self.assertTrue(type(entry[1]) is int)
self.assertTrue(type(entry[2]) is int)
# mmap dict etnry
mmap_dic = MMapSystemDictionary(all_fstdata(), mmap_entries(), connections, chardef.DATA, unknowns.DATA)
entry = mmap_dic.lookup(u'すもも'.encode('utf8'))[0]
self.assertTrue(type(entry[1]) is str)
self.assertTrue(type(entry[0]) is int)
self.assertTrue(type(entry[2]) is int)
self.assertTrue(type(entry[3]) is int)
self.assertTrue(type(entry[4]) is int)
entry_extra = mmap_dic.lookup_extra(entry[0])
self.assertTrue(type(entry_extra[0]) is str)
self.assertTrue(type(entry_extra[1]) is str)
self.assertTrue(type(entry_extra[2]) is str)
self.assertTrue(type(entry_extra[3]) is str)
self.assertTrue(type(entry_extra[4]) is str)
self.assertTrue(type(entry_extra[5]) is str)
# entry in the user defined dictionary
user_dic = UserDictionary(user_dict=os.path.join(parent_dir, 'tests/user_ipadic.csv'),
enc='utf8', type='ipadic', connections=connections)
entry = user_dic.lookup('東京スカイツリー'.encode('utf8'))[0]
self.assertTrue(type(entry[1]) is str)
self.assertTrue(type(entry[0]) is int)
self.assertTrue(type(entry[2]) is int)
self.assertTrue(type(entry[3]) is int)
self.assertTrue(type(entry[4]) is int)
def test_system_dictionary_cache(self):
sys_dic = SystemDictionary(all_fstdata(), entries(), connections, chardef.DATA, unknowns.DATA)
self.assertEqual(11, len(sys_dic.lookup('小書き'.encode('utf8'))))
self.assertEqual(11, len(sys_dic.lookup('小書き'.encode('utf8'))))
self.assertEqual(11, len(sys_dic.lookup('小書きにしました'.encode('utf8'))))
self.assertEqual(10, len(sys_dic.lookup('みんなと'.encode('utf8'))))
self.assertEqual(10, len(sys_dic.lookup('みんなと'.encode('utf8'))))
self.assertEqual(2, len(sys_dic.lookup('叩く'.encode('utf8'))))
self.assertEqual(2, len(sys_dic.lookup('叩く'.encode('utf8'))))
def test_user_dictionary(self):
# create user dictionary from csv
user_dic = UserDictionary(user_dict=os.path.join(parent_dir, 'tests/user_ipadic.csv'),
enc='utf8', type='ipadic', connections=connections)
self.assertEqual(1, len(user_dic.lookup('東京スカイツリー'.encode('utf8'))))
# save compiled dictionary
dic_dir = os.path.join(parent_dir, 'tests/userdic')
user_dic.save(to_dir=os.path.join(parent_dir, 'tests/userdic'))
self.assertTrue(os.path.exists(os.path.join(dic_dir, FILE_USER_FST_DATA)))
self.assertTrue(os.path.exists(os.path.join(dic_dir, FILE_USER_ENTRIES_DATA)))
# load compiled dictionary
compiled_user_dic = CompiledUserDictionary(dic_dir, connections=connections)
self.assertEqual(1, len(compiled_user_dic.lookup('とうきょうスカイツリー駅'.encode('utf8'))))
def test_user_dictionary_with_progress(self):
# create user dictionary from csv with progress indicator
progress_indicator = SimpleProgressIndicator(update_frequency=1.0)
with self.assertLogs(logger=p_logger) as cm:
# create user dictionary
large_user_dic = UserDictionary(
user_dict=os.path.join(parent_dir, 'tests/user_ipadic.csv'),
enc='utf8', type='ipadic', connections=connections,
progress_handler=progress_indicator)
entry_count = len(large_user_dic.entries)
# output for each entry and for complete (entry_count + 1)
self.assertEqual((entry_count + 1) * 2, len(cm.output))
# reset after complete
self.assertIsNone(progress_indicator.value)
for i in range(0, (entry_count + 1) * 2):
if i < entry_count:
# progress for reading csv
self.assertIn('Reading user dictionary from CSV', cm.output[i])
self.assertIn(f'{i + 1}/{entry_count}', cm.output[i])
elif i == entry_count:
# on compete loading csv
self.assertIn(f'{entry_count}/{entry_count}', cm.output[i])
elif i < entry_count * 2 + 1:
# progress for create_minimum_transducer
self.assertIn('Running create_minimum_transducer', cm.output[i])
self.assertIn(f'{i - entry_count}/{entry_count}', cm.output[i])
elif i == entry_count * 2 + 1:
# on compete loading create_minimum_transducer
self.assertIn(f'{entry_count}/{entry_count}', cm.output[i])
# same result as without progress indicator
self.assertEqual(1, len(large_user_dic.lookup('東京スカイツリー'.encode('utf8'))))
def test_simplified_user_dictionary(self):
# create user dictionary from csv
user_dic = UserDictionary(user_dict=os.path.join(parent_dir, 'tests/user_simpledic.csv'),
enc='utf8', type='simpledic', connections=connections)
self.assertEqual(1, len(user_dic.lookup('東京スカイツリー'.encode('utf8'))))
# save compiled dictionary
dic_dir = os.path.join(parent_dir, 'tests/userdic_simple')
user_dic.save(to_dir=os.path.join(parent_dir, 'tests/userdic_simple'))
self.assertTrue(os.path.exists(os.path.join(dic_dir, FILE_USER_FST_DATA)))
self.assertTrue(os.path.exists(os.path.join(dic_dir, FILE_USER_ENTRIES_DATA)))
# load compiled dictionary
compiled_user_dic = CompiledUserDictionary(dic_dir, connections=connections)
self.assertEqual(1, len(compiled_user_dic.lookup('とうきょうスカイツリー駅'.encode('utf8'))))
def test_simplified_user_dictionary_with_progress(self):
# create simplified user dictionary from csv with progress indicator
progress_indicator = SimpleProgressIndicator(update_frequency=1.0)
with self.assertLogs(logger=p_logger) as cm:
# create user dictionary
large_user_dic = UserDictionary(
user_dict=os.path.join(parent_dir, 'tests/user_simpledic.csv'),
enc='utf8', type='simpledic', connections=connections,
progress_handler=progress_indicator)
entry_count = len(large_user_dic.entries)
# output for each entry and for complete (entry_count + 1)
self.assertEqual((entry_count + 1) * 2, len(cm.output))
# value is reset after complete
self.assertIsNone(progress_indicator.value)
for i in range(0, (entry_count + 1) * 2):
if i < entry_count:
# progress for reading csv
self.assertIn('Reading user dictionary from CSV', cm.output[i])
self.assertIn(f'{i + 1}/{entry_count}', cm.output[i])
elif i == entry_count:
# on compete loading csv
self.assertIn(f'{entry_count}/{entry_count}', cm.output[i])
elif i < entry_count * 2 + 1:
# progress for create_minimum_transducer
self.assertIn('Running create_minimum_transducer', cm.output[i])
self.assertIn(f'{i - entry_count}/{entry_count}', cm.output[i])
elif i == entry_count * 2 + 1:
# on compete loading create_minimum_transducer
self.assertIn(f'{entry_count}/{entry_count}', cm.output[i])
# same result as without progress indicator
self.assertEqual(1, len(large_user_dic.lookup('東京スカイツリー'.encode('utf8'))))
if __name__ == '__main__':
unittest.main()
| 50.759825 | 112 | 0.649088 | [
"Apache-2.0"
] | mocobeta/janome | tests/test_dic.py | 11,845 | Python |
import sys
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import smtplib
import logging
logger = logging.getLogger("crawler")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('sc_appointment_check.log')
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(fh)
def gmail_login():
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(sys.argv[3], sys.argv[4])
return server
def verify_gmail():
try:
server = gmail_login()
server.close()
except StandardError as e:
print (e)
logger.error(e)
exit()
def notify_user(month):
FROM = sys.argv[3]
TO = sys.argv[3]
SUBJECT = "[SC Application] Vacancy found in %s" % month
TEXT = "Go to below address to catch the slot: https://eappointment.ica.gov.sg/ibook/index.do"
message = """
From: %(FROM)s
To: %(TO)s
Subject: %(SUBJECT)s
%(TEXT)
""" % locals()
try:
server = gmail_login()
server.sendmail(FROM, TO, message)
server.close()
except StandardError as r:
print "failed to send mail %s" % r
logger.info("failed to send mail %s" % r)
exit()
def go_to_query_page(driver):
driver.get("https://eappointment.ica.gov.sg/ibook/index.do")
driver.switch_to_frame(driver.find_element_by_name("bottomFrame"));
driver.switch_to_frame(driver.find_element_by_name("mainFrame"));
driver.find_element_by_name("apptDetails.apptType").send_keys("Singapore Citizen Application")
driver.find_element_by_name("apptDetails.identifier1").send_keys(sys.argv[1])
driver.find_element_by_name("apptDetails.identifier2").send_keys(sys.argv[2])
driver.find_element_by_name("Submit").send_keys(Keys.ENTER)
def contains_released_dates(driver):
days = driver.find_elements_by_css_selector("td[class^='cal_']")
return any(day.get_attribute("class") in ("cal_AF", "cal_AD") for day in days)
def get_month(driver):
year = int(driver.find_element_by_name("calendar.calendarYearStr").get_attribute("value"))
month = int(driver.find_element_by_name("calendar.calendarMonthStr").get_attribute("value")) + 1
return "%d%.2d" % (year, month)
def check_free_slots(driver):
days = driver.find_elements_by_css_selector("td[class^='cal_']")
current_month = get_month(driver)
if any("cal_AD" in day.get_attribute("class") for day in days):
logger.info("Slots found in %s" % current_month)
notify_user(current_month)
def check():
driver = webdriver.Chrome()
go_to_query_page(driver)
while contains_released_dates(driver):
check_free_slots(driver)
driver.execute_script("doNextMth(document.forms[0]);")
logger.info("Checked until %s, no available slots found." % get_month(driver))
driver.close()
if __name__ == "__main__":
if len(sys.argv) < 5:
print("Please refer to the readme file for proper usage.")
else:
verify_gmail()
retry_interval = sys.argv[5] if len(sys.argv) > 5 else 60
while True:
check()
time.sleep(retry_interval)
| 31.114286 | 100 | 0.669421 | [
"Apache-2.0"
] | jzhang-cloud/e-appointment-checker | sc_appointment_check.py | 3,267 | Python |
# -*- coding: utf-8 -*-
"""Top-level package for PrecisionMapper."""
import requests
from requests import ConnectionError
from datetime import datetime
from bs4 import BeautifulSoup
__author__ = """Thibault Ducret"""
__email__ = 'hello@tducret.com'
__version__ = '0.0.2'
_DEFAULT_BEAUTIFULSOUP_PARSER = "html.parser"
_SIGNIN_URL = "https://www.precisionmapper.com/users/sign_in"
_SURVEYS_URL = "https://www.precisionmapper.com/surveys"
_SHARED_SURVEYS_URL = "https://www.precisionmapper.com/shared_surveys"
_DEFAULT_DATE = "2000-01-01T00:00:00.000Z"
_RFC3339_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
_SHORT_DATE_FORMAT = '%d/%m/%Y %H:%M'
_AUTHENTICITY_TOKEN_SELECTOR = 'meta["name"="csrf-token"]'
_SURVEYS_SELECTOR = "#surveysList .tableCellWrap"
_SURVEY_NAME_SELECTOR = "div.surveyName > a['href']"
_SURVEY_LOCATION_SELECTOR = "div.cellWrap.locationWrapper > span"
_SURVEY_DATE_SELECTOR = "div.cellWrap.surveyDateRow .date"
_SURVEY_IMG_NB_AND_SIZE_SELECTOR = "div.surveyotherDetails > span"
_SURVEY_SENSOR_SELECTOR = ".surveySensorWrap"
_SURVEY_URL_SELECTOR = "div.surveyName > a['href']"
class Client(object):
""" Do the requests with the servers """
def __init__(self):
self.session = requests.session()
self.headers = {
'authority': 'www.precisionmapper.com',
'origin': 'https://www.precisionmapper.com',
'user-Agent': 'Mozilla/5.0 (Macintosh; \
Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/67.0.3396.99 Safari/537.36',
'referer': 'https://www.precisionmapper.com\
/users/sign_in',
}
def _get(self, url, expected_status_code=200):
ret = self.session.get(url=url, headers=self.headers)
if (ret.status_code != expected_status_code):
raise ConnectionError(
'Status code {status} for url {url}\n{content}'.format(
status=ret.status_code, url=url, content=ret.text))
return ret
def _post(self, url, post_data, expected_status_code=200,
allow_redirects=True):
ret = self.session.post(url=url,
headers=self.headers,
data=post_data,
allow_redirects=allow_redirects)
if (ret.status_code != expected_status_code):
raise ConnectionError(
'Status code {status} for url {url}\n{content}'.format(
status=ret.status_code, url=url, content=ret.text))
return ret
class Survey(object):
""" Class for a drone survey (mission) """
def __init__(
self, id, name, url, date, sensor="", location="",
image_nb=0, size="0 MB", thumbnail="", altitude_in_m=0,
resolution_in_cm=0, area_in_ha=0, drone_platform=""):
if type(id) != int:
raise TypeError("id must be an int, not a "+str(type(id)))
self.id = id
if type(image_nb) != int:
raise TypeError("image_nb must be an int, not a " +
str(type(image_nb)))
self.image_nb = image_nb
self.date = date
try:
self.date_obj = _rfc_date_str_to_datetime_object(self.date)
except:
raise TypeError("date must respect the format \
YYYY-MM-DDTHH:MM:SS.sssZ, received : "+date)
self.name = name
self.drone_platform = drone_platform
self.sensor = sensor
self.location = location
self.date_str = _datetime_object_to_short_date_str(self.date_obj)
self.size = size
self.thumbnail = thumbnail
self.altitude_in_m = altitude_in_m
self.resolution_in_cm = resolution_in_cm
self.area_in_ha = area_in_ha
def __str__(self):
return('[{name}] ({location} - {date}) : {image_nb} images, \
{size}, sensor : {sensor}, id : {id}'.format(
name=self.name,
location=self.location,
date=self.date_str,
image_nb=self.image_nb,
size=self.size,
sensor=self.sensor,
id=self.id))
def __repr__(self):
return("Survey(id={}, name={})".format(self.id, self.name))
class PrecisionMapper(object):
""" Class for the communications with precisionmapper.com """
def __init__(self, login, password):
self.login = login
self.password = password
self.client = Client()
def __str__(self):
return(repr(self))
def __repr__(self):
return("PrecisionMapper(login={})".format(self.login))
def get_authenticity_token(self, url=_SIGNIN_URL):
""" Returns an authenticity_token, mandatory for signing in """
res = self.client._get(url=url, expected_status_code=200)
soup = BeautifulSoup(res.text, _DEFAULT_BEAUTIFULSOUP_PARSER)
selection = soup.select(_AUTHENTICITY_TOKEN_SELECTOR)
try:
authenticity_token = selection[0].get("content")
except:
raise ValueError(
"authenticity_token not found in {} with {}\n{}".format(
_SIGNIN_URL, _AUTHENTICITY_TOKEN_SELECTOR, res.text))
return authenticity_token
def sign_in(self):
authenticity_token = self.get_authenticity_token()
post_data = {"utf8": "✓",
"authenticity_token": authenticity_token,
"return": "",
"login[username]": self.login,
"login[password]": self.password,
"commit": "Log In"}
res = self.client._post(url=_SIGNIN_URL, post_data=post_data,
expected_status_code=302,
allow_redirects=False)
# "allow_redirects = False" because we don't want to load the
# <survey> page right now => better performance
return res
def get_surveys(self, url=_SURVEYS_URL):
""" Function to get the surveys for the account """
res = self.client._get(url=url, expected_status_code=200)
soup = BeautifulSoup(res.text, _DEFAULT_BEAUTIFULSOUP_PARSER)
surveys_soup = soup.select(_SURVEYS_SELECTOR)
survey_list = []
for survey_soup in surveys_soup:
survey_name = _css_select(survey_soup, _SURVEY_NAME_SELECTOR)
try:
url = survey_soup.select(_SURVEY_URL_SELECTOR)[0]["href"]
except:
raise ValueError("Cannot get URL for the survey \
with css selector {}".format(_SURVEY_URL_SELECTOR))
try:
id = int(url.split("survey_id=")[1].split("&")[0])
except:
raise ValueError("Cannot extract id from URL {}".format(
url))
survey_location = _css_select(survey_soup,
_SURVEY_LOCATION_SELECTOR)
try:
survey_epoch = int(survey_soup.select(
_SURVEY_DATE_SELECTOR)[0]["epoch"])
survey_date_obj = datetime.fromtimestamp(survey_epoch)
survey_date = _datetime_object_to_rfc_date_str(survey_date_obj)
except:
raise ValueError("Cannot get date for the survey \
with css selector {}".format(_SURVEY_DATE_SELECTOR))
survey_img_nb_and_size = survey_soup.select(
_SURVEY_IMG_NB_AND_SIZE_SELECTOR)
try:
survey_img_nb = survey_img_nb_and_size[0].text
survey_img_nb = int(survey_img_nb.split(" ")[0])
except:
raise ValueError("Cannot get or convert image number, \
survey_img_nb_and_size = {}".format(survey_img_nb_and_size))
try:
survey_size = survey_img_nb_and_size[1].text
except:
raise ValueError("Cannot get survey size, \
survey_img_nb_and_size = {}".format(survey_img_nb_and_size))
sensor = _css_select(survey_soup, _SURVEY_SENSOR_SELECTOR)
survey = Survey(
id=id, name=survey_name, url=url,
date=survey_date, location=survey_location,
image_nb=survey_img_nb, size=survey_size, sensor=sensor)
survey_list.append(survey)
return survey_list
def get_shared_surveys(self, url=_SHARED_SURVEYS_URL):
return self.get_surveys(url=url)
def _css_select(soup, css_selector):
""" Returns the content of the element pointed by the CSS selector,
or an empty string if not found """
selection = soup.select(css_selector)
if len(selection) > 0:
if hasattr(selection[0], 'text'):
retour = selection[0].text.strip()
else:
retour = ""
else:
retour = ""
return retour
def _datetime_object_to_rfc_date_str(datetime_obj):
""" Returns a date string to the RFC 3339 standard """
return datetime_obj.strftime(_RFC3339_DATE_FORMAT)
def _rfc_date_str_to_datetime_object(rfc_date_str):
""" Returns a date string to the RFC 3339 standard """
return datetime.strptime(rfc_date_str, _RFC3339_DATE_FORMAT)
def _datetime_object_to_short_date_str(datetime_obj):
""" Returns a short date string """
return datetime_obj.strftime(_SHORT_DATE_FORMAT)
| 38.178862 | 79 | 0.610733 | [
"MIT"
] | tducret/precisionmapper-python | precisionmapper/__init__.py | 9,394 | Python |
from __future__ import absolute_import, unicode_literals
import sys
from subprocess import CalledProcessError
import pytest
from virtualenv.info import PY2
from virtualenv.seed.wheels.acquire import download_wheel, pip_wheel_env_run
from virtualenv.seed.wheels.embed import BUNDLE_FOLDER, get_embed_wheel
from virtualenv.seed.wheels.util import discover_wheels
def test_pip_wheel_env_run_could_not_find(session_app_data, mocker):
mocker.patch("virtualenv.seed.wheels.acquire.from_bundle", return_value=None)
with pytest.raises(RuntimeError, match="could not find the embedded pip"):
pip_wheel_env_run([], session_app_data)
def test_download_wheel_bad_output(mocker, for_py_version, session_app_data):
"""if the download contains no match for what wheel was downloaded, pick one that matches from target"""
distribution = "setuptools"
p_open = mocker.MagicMock()
mocker.patch("virtualenv.seed.wheels.acquire.Popen", return_value=p_open)
p_open.communicate.return_value = "", ""
p_open.returncode = 0
embed = get_embed_wheel(distribution, for_py_version)
as_path = mocker.MagicMock()
available = discover_wheels(BUNDLE_FOLDER, "setuptools", None, for_py_version)
as_path.iterdir.return_value = [i.path for i in available]
result = download_wheel(distribution, "=={}".format(embed.version), for_py_version, [], session_app_data, as_path)
assert result.path == embed.path
def test_download_fails(mocker, for_py_version, session_app_data):
p_open = mocker.MagicMock()
mocker.patch("virtualenv.seed.wheels.acquire.Popen", return_value=p_open)
p_open.communicate.return_value = "out", "err"
p_open.returncode = 1
as_path = mocker.MagicMock()
with pytest.raises(CalledProcessError) as context:
download_wheel("pip", "==1", for_py_version, [], session_app_data, as_path),
exc = context.value
if PY2:
assert exc.output == "outerr"
else:
assert exc.output == "out"
assert exc.stderr == "err"
assert exc.returncode == 1
assert [
sys.executable,
"-m",
"pip",
"download",
"--progress-bar",
"off",
"--disable-pip-version-check",
"--only-binary=:all:",
"--no-deps",
"--python-version",
for_py_version,
"-d",
str(as_path),
"pip==1",
] == exc.cmd
| 34.73913 | 118 | 0.698373 | [
"MIT"
] | MShaffar19/virtualenv | tests/unit/seed/wheels/test_acquire.py | 2,397 | Python |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <mario.lassnig@cern.ch>, 2012-2015, 2017
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2012-2013
# - Thomas Beermann, <thomas.beermann@cern.ch>, 2014
# - Hannes Hansen, <hannes.jakob.hansen@cern.ch>, 2019
# - Ruturaj Gujar <ruturaj.gujar23@gmail.com>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
| 37.849462 | 132 | 0.69446 | [
"Apache-2.0"
] | Pranay144/rucio | lib/rucio/core/identity.py | 7,040 | Python |
import pygame
import os
from pygame.locals import *
import config
import game
import engine
import menu
from random import randint
import _fighter
from pygame_functions import *
class Scenario:
def __init__(self, game, scenario):
self.game = game
self.scenario = scenario
pygame.mixer.music.stop()
music = engine.Music("mkt")
music.play()
music.volume(0.5)
def setScenario(self, scenario):
if scenario == 9:
scenario = randint(1, 8)
#self.scene = pygame.image.load('../res/Background/Scenario'+str(scenario)+'.png')
#self.game.getDisplay().blit(self.scene, (0, 0))
#pygame.display.update()
#screenSize(800, 500,"pyKombat",None,None,True) # FullScreen
screenSize(800, 500,"pyKombat") # Minimized
setBackgroundImage('../res/Background/Scenario'+str(scenario)+'.png')
self.judge(scenario)
def judge(self,scenario):
[player1,player2] = self.addFigther(scenario)
player1.act()
player2.act()
nextFrame1 = clock()
nextFrame2 = clock()
hitCounter = 0
while True:
aux1 = player1.fight(clock(),nextFrame1)
nextFrame1 = aux1
aux2 = player2.fight(clock(),nextFrame2)
nextFrame2 = aux2
x1 = player1.getX()
x2 = player2.getX()
#print(x1, x2, x2-x1)
# caso encostem na tela
if player1.getX() < 20:
player1.setX(20)
if player2.getX() < 20:
player2.setX(20)
if player1.getX() > (800-20):
player1.setX(800-20)
if player2.getX() > (800-20):
player2.setX(800-20)
if(collide(player1.currentSprite(),player2.currentSprite())):
# caso só encostem
if ( (player1.isWalking() or player1.isJumping()) and (player2.isDancing() or player2.isCrouching() or player2.isWalking()) ) or ((player2.isWalking() or player2.isJumping()) and (player1.isDancing() or player1.isCrouching() or player2.isWalking()) ) or (player1.isWalking() and player2.isWalking()) or (player1.isJumping() and player2.isJumping()) or (player1.isDancing() and player2.isDancing()) or (player2.isSpecialMove() and player1.ishitSpecial()):
player1.setX(x1-15)
if not player2.isSpecialMove() :player2.setX(x2+15)
else: player1.setX(x1-25)
# caso houve soco fraco:
if ( player1.isApunching() and (player2.isWalking() or player2.isDancing() or player2.isApunching() or player2.ishitSpecial()) ) or ( player2.isApunching() and (player1.isWalking() or player1.isDancing() or player1.isApunching()) ):
if player1.isApunching():
player2.takeHit("Apunching")
if player2.isApunching():
player1.takeHit("Apunching")
print("socofraco")
engine.Sound("Hit0").play()
if hitCounter == 0: engine.Sound().roundHit()
hitCounter = (hitCounter+1) % 5
# caso houve soco forte:
if ( player1.isBpunching() and (player2.isWalking() or player2.isDancing() or player2.isBpunching()) ) or ( player2.isBpunching() and (player1.isWalking() or player1.isDancing() or player1.isBpunching()) ):
if player1.isBpunching():
player2.takeHit("Bpunching")
if player2.isBpunching():
player1.takeHit("Bpunching")
print("socoforte")
engine.Sound("Hit0").play()
if hitCounter == 0: engine.Sound().roundHit()
hitCounter = (hitCounter+1) % 5
# caso houve chute fraco:
if ( player1.isAkicking() and (player2.isWalking() or player2.isDancing() or player2.isAkicking() or player2.isCrouching()) and not player2.isBblocking() ) or ( player2.isAkicking() and (player1.isWalking() or player1.isDancing() or player1.isAkicking() or player1.isCrouching() and not player1.isBblocking()) ):
if player1.isAkicking():
player2.takeHit("Akicking")
if player2.isAkicking():
player1.takeHit("Akicking")
print("chutefraco")
engine.Sound("Hit0").play()
if hitCounter == 0: engine.Sound().roundHit()
hitCounter = (hitCounter+1) % 5
# caso houve chute forte:
if ( player1.isBkicking() and (player2.isWalking() or player2.isDancing() or player2.isBkicking()) ) or ( player2.isBkicking() and (player1.isWalking() or player1.isDancing() or player1.isBkicking()) ):
if player1.isBkicking():
player2.takeHit("Bkicking")
if player2.isBkicking():
player1.takeHit("Bkicking")
print("chuteforte")
engine.Sound("Hit0").play()
if hitCounter == 0: engine.Sound().roundHit()
hitCounter = (hitCounter+1) % 5
# caso houve bloqueio em pé:
if ( (player1.isApunching() or player1.isBpunching() or player1.isDpunching() or player1.isAkicking() or player1.isBkicking() ) and player2.isAblocking() ) or ( (player2.isApunching() or player2.isBpunching() or player1.isDpunching() or player2.isAkicking() or player2.isBkicking() ) and player1.isAblocking() ):
if player1.isAblocking():
player1.takeHit("Ablocking")
if player2.isAblocking():
player2.takeHit("Ablocking")
engine.Sound("block").play()
player1.setX(x1-12)
player2.setX(x2+12)
print("ablock")
# caso houve soco ou chute agachado fraco em alguém em pé:
if ( ((player1.isCpunching() or player1.isCkicking() ) and not player2.isCrouching() and not player2.isBblocking() ) or ((player2.isCpunching() or player2.isCkicking() ) and not player1.isCrouching() and not player1.isBblocking() ) ): # falta adicionar o Bblock
if player1.isCpunching() or player1.isCkicking():
player2.takeHit("Cpunching")
if player2.isCpunching() or player2.isCkicking():
player1.takeHit("Cpunching")
print("socofraco!!!!!!!")
engine.Sound("Hit0").play()
if hitCounter == 0: engine.Sound().roundHit()
hitCounter = (hitCounter+1) % 5
# caso houve soco agachado forte em alguém em pé:
if ( (player1.isDpunching() and (not player2.isAblocking() and not player2.isBblocking()) ) or player2.isDpunching() and (not player1.isAblocking() and not player1.isBblocking()) ):
if player1.isDpunching():
player2.takeHit("Bkicking")
if player2.isDpunching():
player1.takeHit("Bkicking")
print("socofraco$#$")
engine.Sound("Hit0").play()
if hitCounter == 0: engine.Sound().roundHit()
hitCounter = (hitCounter+1) % 5
# caso houve chute agachado forte em alguém em pé:
if ( player1.isDkicking() or player2.isDkicking() ):
if player1.isDkicking():
player2.takeHit("Dkicking")
if player2.isDkicking():
player1.takeHit("Dkicking")
print("socofraco")
engine.Sound("Hit0").play()
if hitCounter == 0: engine.Sound().roundHit()
hitCounter = (hitCounter+1) % 5
# caso houve soco ou chute agachado fraco em alguém agachado:
if ( ( (player1.isCpunching() or player1.isCkicking()) and player2.isCrouching() and not player2.isBblocking() ) or ( (player2.isCpunching() or player2.isCkicking()) and player1.isCrouching() and not player1.isBblocking() ) ):
if player1.isCpunching() or player1.isCkicking():
player2.takeDownHit("Ehit")
if player2.isCpunching() or player2.isCkicking():
player1.takeDownHit("Ehit")
print("socofraco**")
engine.Sound("Hit0").play()
if hitCounter == 0: engine.Sound().roundHit()
hitCounter = (hitCounter+1) % 5
# caso houve bloqueio agachado:
if ( (player1.isCpunching() or player1.isDpunching() or player1.isAkicking() or player1.isCkicking() ) and player2.isBblocking() ) or ( (player2.isCpunching() or player2.isDpunching() or player2.isAkicking() or player2.isCkicking() ) and player1.isBblocking() ):
if player1.isBblocking():
player1.takeDownHit("Bblocking")
if player2.isBblocking():
player2.takeDownHit("Bblocking")
engine.Sound("block").play()
player1.setX(x1-12)
player2.setX(x2+12)
print("bblock")
# caso houve special
if ( player1.isSpecialMove() and (player2.isWalking() or player2.isDancing()) ) or ( player2.isSpecialMove() and (player1.isWalking() or player1.isDancing()) ):
if player1.isSpecialMove() and collide(player1.getProjectile().getProjectileSprite(), player2.currentSprite()): # and collide(projetil,player2)
player2.takeHit("special")
if player2.isSpecialMove(): # and collide(projetil,player1)
player1.takeHit("special")
print("special")
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
if keyPressed("backspace"):
pygame.quit()
if keyPressed("esc"):
self.goBack(player1,player2)
def addFigther(self,scenario):
player1 = _fighter.Fighter(0,scenario) # 0: subzero
player2 = _fighter.Fighter(1,scenario) # 1: scorpion
return player1,player2
def goBack(self,player1,player2):
player1.killPlayer()
player2.killPlayer()
del(player1)
del(player2)
sound = engine.Sound("back")
sound.play()
pygame.mixer.music.stop()
music = engine.Music("intro")
music.play()
music.volume(0.5)
menu.ScenarioMenu()
def collide(sprite1,sprite2):
return pygame.sprite.collide_mask(sprite1,sprite2)
| 55.076923 | 470 | 0.528195 | [
"MIT"
] | Lewiscowles1986/pyKombat | .history/src/fightScene_20190422211023.py | 11,465 | Python |
from fastapi import HTTPException, status
from sqlalchemy.orm import Session
from blog import hashing, models, schemas
def create(request: schemas.User, db: Session):
new_user = models.User(name=request.name,
email=request.email,
password=hashing.Hash.bcrypt(request.password))
db.add(new_user)
db.commit()
db.refresh(new_user)
return new_user
def get_one(id: int, db: Session):
user = db.query(models.User).filter(models.User.id == id).first()
if not user:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f'User with the id {id} is not found.')
return user
def bulk_load(data, db: Session):
for i in data:
new_post = models.User(name=i[0],
email=i[1],
password=hashing.Hash.bcrypt(i[2]))
db.add(new_post)
db.commit()
db.refresh(new_post)
return len(data)
| 28.742857 | 74 | 0.592445 | [
"MIT"
] | cristian-rincon/blog-api | app/blog/repository/user.py | 1,006 | Python |
# -*- coding: utf-8 -*
__author__ = 'shawn'
t = (1,)
print t
t = (1)
print t
t = (1, 2, 3)
print t
| 9.181818 | 22 | 0.50495 | [
"MIT"
] | BeeBubble/SnakeRace | Liao Xue Feng Py2 Edu/tuple.py | 101 | Python |
try:
from django.utils.unittest import TestCase
except ImportError:
from django.test import TestCase
try:
from django.utils import unittest
except ImportError:
import unittest
from mock import Mock
import string
from evennia.server.portal import irc
from twisted.conch.telnet import IAC, WILL, DONT, SB, SE, NAWS, DO
from twisted.test import proto_helpers
from twisted.trial.unittest import TestCase as TwistedTestCase
from .telnet import TelnetServerFactory, TelnetProtocol
from .portal import PORTAL_SESSIONS
from .suppress_ga import SUPPRESS_GA
from .naws import DEFAULT_HEIGHT, DEFAULT_WIDTH
from .ttype import TTYPE, IS
from .mccp import MCCP
from .mssp import MSSP
from .mxp import MXP
from .telnet_oob import MSDP, MSDP_VAL, MSDP_VAR
class TestIRC(TestCase):
def test_plain_ansi(self):
"""
Test that printable characters do not get mangled.
"""
irc_ansi = irc.parse_ansi_to_irc(string.printable)
ansi_irc = irc.parse_irc_to_ansi(string.printable)
self.assertEqual(irc_ansi, string.printable)
self.assertEqual(ansi_irc, string.printable)
def test_bold(self):
s_irc = "\x02thisisatest"
s_eve = r'|hthisisatest'
self.assertEqual(irc.parse_ansi_to_irc(s_eve), s_irc)
self.assertEqual(s_eve, irc.parse_irc_to_ansi(s_irc))
def test_italic(self):
s_irc = "\x02thisisatest"
s_eve = r'|hthisisatest'
self.assertEqual(irc.parse_ansi_to_irc(s_eve), s_irc)
def test_colors(self):
color_map = (("\0030", r'|w'),
("\0031", r'|X'),
("\0032", r'|B'),
("\0033", r'|G'),
("\0034", r'|r'),
("\0035", r'|R'),
("\0036", r'|M'),
("\0037", r'|Y'),
("\0038", r'|y'),
("\0039", r'|g'),
("\00310", r'|C'),
("\00311", r'|c'),
("\00312", r'|b'),
("\00313", r'|m'),
("\00314", r'|x'),
("\00315", r'|W'),
("\00399,5", r'|[r'),
("\00399,3", r'|[g'),
("\00399,7", r'|[y'),
("\00399,2", r'|[b'),
("\00399,6", r'|[m'),
("\00399,10", r'|[c'),
("\00399,15", r'|[w'),
("\00399,1", r'|[x'))
for m in color_map:
self.assertEqual(irc.parse_irc_to_ansi(m[0]), m[1])
self.assertEqual(m[0], irc.parse_ansi_to_irc(m[1]))
def test_identity(self):
"""
Test that the composition of the function and
its inverse gives the correct string.
"""
s = r'|wthis|Xis|gis|Ma|C|complex|*string'
self.assertEqual(irc.parse_irc_to_ansi(irc.parse_ansi_to_irc(s)), s)
class TestTelnet(TwistedTestCase):
def setUp(self):
super(TestTelnet, self).setUp()
factory = TelnetServerFactory()
factory.protocol = TelnetProtocol
factory.sessionhandler = PORTAL_SESSIONS
factory.sessionhandler.portal = Mock()
self.proto = factory.buildProtocol(("localhost", 0))
self.transport = proto_helpers.StringTransport()
self.addCleanup(factory.sessionhandler.disconnect_all)
def test_mudlet_ttype(self):
self.transport.client = ["localhost"]
self.transport.setTcpKeepAlive = Mock()
d = self.proto.makeConnection(self.transport)
# test suppress_ga
self.assertTrue(self.proto.protocol_flags["NOGOAHEAD"])
self.proto.dataReceived(IAC + DONT + SUPPRESS_GA)
self.assertFalse(self.proto.protocol_flags["NOGOAHEAD"])
self.assertEqual(self.proto.handshakes, 7)
# test naws
self.assertEqual(self.proto.protocol_flags['SCREENWIDTH'], {0: DEFAULT_WIDTH})
self.assertEqual(self.proto.protocol_flags['SCREENHEIGHT'], {0: DEFAULT_HEIGHT})
self.proto.dataReceived(IAC + WILL + NAWS)
self.proto.dataReceived([IAC, SB, NAWS, '', 'x', '', 'd', IAC, SE])
self.assertEqual(self.proto.protocol_flags['SCREENWIDTH'][0], 120)
self.assertEqual(self.proto.protocol_flags['SCREENHEIGHT'][0], 100)
self.assertEqual(self.proto.handshakes, 6)
# test ttype
self.assertTrue(self.proto.protocol_flags["FORCEDENDLINE"])
self.assertFalse(self.proto.protocol_flags["TTYPE"])
self.assertTrue(self.proto.protocol_flags["ANSI"])
self.proto.dataReceived(IAC + WILL + TTYPE)
self.proto.dataReceived([IAC, SB, TTYPE, IS, "MUDLET", IAC, SE])
self.assertTrue(self.proto.protocol_flags["XTERM256"])
self.assertEqual(self.proto.protocol_flags["CLIENTNAME"], "MUDLET")
self.proto.dataReceived([IAC, SB, TTYPE, IS, "XTERM", IAC, SE])
self.proto.dataReceived([IAC, SB, TTYPE, IS, "MTTS 137", IAC, SE])
self.assertEqual(self.proto.handshakes, 5)
# test mccp
self.proto.dataReceived(IAC + DONT + MCCP)
self.assertFalse(self.proto.protocol_flags['MCCP'])
self.assertEqual(self.proto.handshakes, 4)
# test mssp
self.proto.dataReceived(IAC + DONT + MSSP)
self.assertEqual(self.proto.handshakes, 3)
# test oob
self.proto.dataReceived(IAC + DO + MSDP)
self.proto.dataReceived([IAC, SB, MSDP, MSDP_VAR, "LIST", MSDP_VAL, "COMMANDS", IAC, SE])
self.assertTrue(self.proto.protocol_flags['OOB'])
self.assertEqual(self.proto.handshakes, 2)
# test mxp
self.proto.dataReceived(IAC + DONT + MXP)
self.assertFalse(self.proto.protocol_flags['MXP'])
self.assertEqual(self.proto.handshakes, 1)
# clean up to prevent Unclean reactor
self.proto.nop_keep_alive.stop()
self.proto._handshake_delay.cancel()
return d
| 39.585526 | 97 | 0.590327 | [
"BSD-3-Clause"
] | Antrare/evennia | evennia/server/portal/tests.py | 6,017 | Python |
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# new features introduced by itk module
# each new feature use a name in lower case
clrLine = "\033[2000D\033[K"
def auto_not_in_place( v=True ) :
"""Force it to not run in place
"""
import itkConfig
itkConfig.NotInPlace = v
def auto_progress( progressType = 1 ):
"""Set up auto progress report
progressType:
1 or True -> auto progress be used in a terminal
2 -> simple auto progress (without special characters)
0 or False -> disable auto progress
"""
import itkConfig
if progressType == True or progressType == 1 :
itkConfig.ImportCallback = terminal_import_callback
itkConfig.ProgressCallback = terminal_progress_callback
elif progressType == 2 :
itkConfig.ImportCallback = simple_import_callback
itkConfig.ProgressCallback = simple_progress_callback
elif progressType == False or progressType == 0 :
itkConfig.ImportCallback = None
itkConfig.ProgressCallback = None
else:
raise ValueError("Invalid auto progress type: "+repr(progressType))
def terminal_progress_callback(name, p):
"""Display the progress of an object and clean the display once complete
This function can be used with itkConfig.ProgressCallback
"""
import sys
print >> sys.stderr, clrLine+"%s: %f" % (name, p),
if p == 1 :
print >> sys.stderr, clrLine,
def terminal_import_callback(name, p):
"""Display the loading of a module and clean the display once complete
This function can be used with itkConfig.ImportCallback
"""
import sys
print >> sys.stderr, clrLine+"Loading %s..." % name,
if p == 1 :
print >> sys.stderr, clrLine,
def simple_import_callback(name, p):
"""Print a message when a module is loading
This function can be used with itkConfig.ImportCallback
"""
import sys
if p == 0:
print >> sys.stderr, "Loading %s..." % name,
elif p == 1 :
print >> sys.stderr, "done"
def simple_progress_callback(name, p):
"""Print a message when an object is running
This function can be used with itkConfig.ProgressCallback
"""
import sys
if p == 0 :
print >> sys.stderr, "Running %s..." % name,
elif p == 1 :
print >> sys.stderr, "done"
def force_load():
"""force itk to load all the submodules"""
import itk
for k in dir(itk):
getattr(itk, k)
import sys
def echo(object, f=sys.stderr) :
"""Print an object is f
If the object has a method Print(), this method is used.
repr(object) is used otherwise
"""
print >> f, object
del sys
def size(imageOrFilter) :
"""Return the size of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
imageOrFilter.UpdateOutputInformation()
img = output(imageOrFilter)
return img.GetLargestPossibleRegion().GetSize()
def physical_size(imageOrFilter) :
"""Return the physical size of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
from __builtin__ import range # required because range is overladed in this module
spacing_ = spacing(imageOrFilter)
size_ = size(imageOrFilter)
result = []
for i in range(0, spacing_.Size()):
result.append( spacing_.GetElement(i) * size_.GetElement(i) )
return result
def spacing(imageOrFilter) :
"""Return the spacing of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
imageOrFilter.UpdateOutputInformation()
img = output(imageOrFilter)
return img.GetSpacing()
def origin(imageOrFilter) :
"""Return the origin of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
imageOrFilter.UpdateOutputInformation()
img = output(imageOrFilter)
return img.GetOrigin()
def index(imageOrFilter) :
"""Return the index of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
imageOrFilter.UpdateOutputInformation()
img = output(imageOrFilter)
return img.GetLargestPossibleRegion().GetIndex()
def region(imageOrFilter) :
"""Return the region of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
imageOrFilter.UpdateOutputInformation()
img = output(imageOrFilter)
return img.GetLargestPossibleRegion()
def strel(dim, radius=1) :
"""A method to create a ball structuring element
"""
import itk
import sys
# print >> sys.stderr, "strel() is deprecated and will be removed in the next release"
return itk.FlatStructuringElement[dim].Ball(radius)
# return an image
from itkTemplate import image, output
def template(cl) :
"""Return the template of a class (or of the class of an object) and its parameters
template() returns a tuple with 2 elements:
- the first one is the itkTemplate object
- the second is a tuple containing the template parameters
"""
from itkTemplate import itkTemplate
return itkTemplate.__class_to_template__[class_(cl)]
def ctype(s) :
"""Return the c type corresponding to the string passed in parameter
The string can contain some extra spaces.
see also itkCType
"""
from itkTypes import itkCType
ret = itkCType.GetCType(" ".join(s.split()))
if ret == None :
raise KeyError("Unrecognized C type '%s'" % s)
return ret
def class_(obj) :
"""Return a class from an object
Often in itk, the __class__ is not what the user is expecting.
class_() should do a better job
"""
import inspect
if inspect.isclass(obj) :
# obj is already a class !
return obj
else :
return obj.__class__
def range(imageOrFilter) :
"""Return the range of values in a image of in the output image of a filter
The minimum and maximum values are returned in a tuple: (min, max)
range() take care of updating the pipeline
"""
import itk
img = output(imageOrFilter)
img.UpdateOutputInformation()
img.Update()
# don't put that calculator in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
comp = itk.MinimumMaximumImageCalculator[img].New(Image=img)
auto_pipeline.current = tmp_auto_pipeline
comp.Compute()
return (comp.GetMinimum(), comp.GetMaximum())
def write(imageOrFilter, fileName, compression=False):
"""Write a image or the output image of a filter to filename
The writer is instantiated with the image type of the image in
parameter (or, again, with the output image of the filter in parameter)
"""
import itk
img = output(imageOrFilter)
img.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.ImageFileWriter[img].New(Input=img, FileName=fileName, UseCompression=compression)
auto_pipeline.current = tmp_auto_pipeline
writer.Update()
def index_to_physical_point( imageOrFilter, idx ):
"""Get the pysical point in an image from an index
imageOrFilter is the image where the physical point must be computed
idx is the index used to compute the physical point. It can be a continuous index.
"""
import sys
print >> sys.stderr, "WrapITK warning: itk.index_to_physical_point() is deprecated. The coresponding templated method is now available in itk::ImageBase."
from __builtin__ import range # required because range is overladed in this module
# get the image if needed
img = output( imageOrFilter )
dim = img.GetImageDimension()
o = origin( img )
s = spacing( img )
# use the typemaps to really get a continuous index
import itk
idx = itk.ContinuousIndex[ itk.D, dim ]( idx )
# create the output object
p = itk.Point[ itk.D, dim ]()
for i in range( 0, dim ):
p[i] = s[i] * idx[i] + o[i]
return p
def physical_point_to_continuous_index( imageOrFilter, p ):
"""Get the continuous index in an image from the physical point
imageOrFilter is the image where the physical point must be computed
p is the point used to compute the index
"""
import sys
print >> sys.stderr, "WrapITK warning: itk.index_to_physical_point() is deprecated. The coresponding templated method is now available in itk::ImageBase."
from __builtin__ import range # required because range is overladed in this module
# get the image if needed
img = output( imageOrFilter )
dim = img.GetImageDimension()
o = origin( img )
s = spacing( img )
# use the typemaps to really get a point
import itk
p = itk.Point[ itk.D, dim ]( p )
# create the output object
idx = itk.ContinuousIndex[ itk.D, dim ]()
for i in range( 0, dim ):
idx.SetElement( i, ( p[i] - o[i] ) / s[i] )
return idx
def physical_point_to_index( imageOrFilter, p ):
"""Get the index in an image from the physical point
image is the image where the physical point must be computed
p is the point used to compute the index
"""
import sys
print >> sys.stderr, "WrapITK warning: itk.physical_point_to_index() is deprecated. The coresponding templated method is now available in itk::ImageBase."
from __builtin__ import range # required because range is overladed in this module
# get the image if needed
img = output( imageOrFilter )
dim = img.GetImageDimension()
o = origin( img )
s = spacing( img )
# use the typemaps to really get a point
import itk
p = itk.Point[ itk.D, dim ]( p )
# create the output object
idx = itk.Index[ dim ]()
for i in range( 0, dim ):
idx.SetElement( i, int( round( ( p[i] - o[i] ) / s[i] ) ) )
return idx
def search( s, case_sensitive=False): #, fuzzy=True):
"""Search for a class name in the itk module.
"""
s = s.replace(" ", "")
if not case_sensitive:
s = s.lower()
import itk
names = dir(itk)
names.sort()
# exact match first
if case_sensitive:
res = [n for n in names if s == n]
else:
res = [n for n in names if s == n.lower()]
# then exact match inside the name
if case_sensitive:
res += [n for n in names if s in n and s != n]
else:
res += [n for n in names if s in n.lower() and s != n.lower()]
# if fuzzy:
# try:
# # everything now requires editdist
# import editdist
# if case_sensitive:
# res.sort(key=lambda x: editdist.distance(x, s))
# else:
# res.sort(key=lambda x: (editdist.distance(x.lower(), s), x))
# except:
# pass
return res
def set_inputs( newItkObject, args=[], kargs={} ):
"""Set the inputs of the given objects, according to the non named or the named parameters in args and kargs
This function tries to assign all the non named parameters in the input of the newItkObject
- the first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
set_inputs( obj, kargs={'Threshold': 10} ) calls obj.SetThreshold(10)
This is the function use in the enhanced New() method to manage the inputs.
It can be used to produce a similar behavior:
def SetInputs(self, *args, **kargs):
import itk
itk.set_inputs(self, *args, **kargs)
"""
# try to get the images from the filters in args
args = [output(arg) for arg in args]
# args without name are filter used to set input image
#
# count SetInput calls to call SetInput, SetInput2, SetInput3, ...
# usefull with filter which take 2 input (or more) like SubstractImageFiler
# Ex: substract image2.png to image1.png and save the result in result.png
# r1 = itk.ImageFileReader.US2.New(FileName='image1.png')
# r2 = itk.ImageFileReader.US2.New(FileName='image2.png')
# s = itk.SubtractImageFilter.US2US2US2.New(r1, r2)
# itk.ImageFileWriter.US2.New(s, FileName='result.png').Update()
try :
for setInputNb, arg in enumerate(args) :
methodName = 'SetInput%i' % (setInputNb+1)
if methodName in dir(newItkObject) :
# first try to use methods called SetInput1, SetInput2, ...
# those method should have more chances to work in case of multiple
# input types
getattr(newItkObject, methodName)(arg)
else :
# no method called SetInput?
# try with the standard SetInput(nb, input)
newItkObject.SetInput(setInputNb, arg)
except TypeError, e :
# the exception have (at least) to possible reasons:
# + the filter don't take the input number as first argument
# + arg is an object of wrong type
#
# if it's not the first input, re-raise the exception
if setInputNb != 0 :
raise e
# it's the first input, try to use the SetInput() method without input number
newItkObject.SetInput(args[0])
# but raise an exception if there is more than 1 argument
if len(args) > 1 :
raise TypeError('Object accept only 1 input.')
except AttributeError :
# There is no SetInput() method, try SetImage
# but before, check the number of inputs
if len(args) > 1 :
raise TypeError('Object accept only 1 input.')
methodList = ['SetImage', 'SetInputImage']
methodName = None
for m in methodList:
if m in dir(newItkObject):
methodName = m
if methodName :
getattr(newItkObject, methodName)(args[0])
else:
raise AttributeError('No method found to set the input.')
# named args : name is the function name, value is argument(s)
for attribName, value in kargs.iteritems() :
# use Set as prefix. It allow to use a shorter and more intuitive
# call (Ex: itk.ImageFileReader.UC2.New(FileName='image.png')) than with the
# full name (Ex: itk.ImageFileReader.UC2.New(SetFileName='image.png'))
if attribName not in ["auto_progress", "template_parameters"] :
attrib = getattr(newItkObject, 'Set' + attribName)
attrib(value)
def show(input, **kargs) :
"""display an image
"""
import itk
img = output(input)
if img.GetImageDimension() == 3 and "show3D" in dir(itk):
return itk.show3D(input, **kargs)
else :
# print "2D not supported yet, use the 3D viewer."
return show2D(input, **kargs)
class show2D :
"""Display a 2D image
"""
def __init__(self, imageOrFilter, Label=False, Title=None) :
import tempfile, itk, os
# get some data from the environment
command = os.environ.get("WRAPITK_SHOW2D_COMMAND", "imagej %(image)s -run 'View 100%%' -eval 'rename(\"%(title)s\")' &")
label_command = os.environ.get("WRAPITK_SHOW2D_LABEL_COMMAND", "imagej %(image)s -run 'View 100%%' -eval 'rename(\"%(title)s\")' -run '3-3-2 RGB' &")
compress = os.environ.get("WRAPITK_SHOW2D_COMPRESS", "true").lower() in ["on", "true", "yes", "1"]
extension = os.environ.get("WRAPITK_SHOW2D_EXTENSION", ".tif")
# use the tempfile module to get a non used file name and to put
# the file at the rignt place
self.__tmpFile__ = tempfile.NamedTemporaryFile(suffix=extension)
# get an updated image
img = output(imageOrFilter)
img.UpdateOutputInformation()
img.Update()
if Title == None:
# try to generate a title
s = img.GetSource()
if s:
s = itk.down_cast(s)
if hasattr(img, "GetSourceOutputIndex"):
o = '[%s]' % img.GetSourceOutputIndex()
elif hasattr(img, "GetSourceOutputName"):
o = '[%s]' % img.GetSourceOutputName()
else:
o = ""
Title = "%s%s" % (s.__class__.__name__, o)
else:
Title = img.__class__.__name__
try:
import IPython.ipapi
ip = IPython.ipapi.get()
if ip != None:
names = []
ref = imageOrFilter
if s:
ref = s
for n, v in ip.user_ns.iteritems():
if isinstance(v, itk.LightObject) and v == ref:
names.append(n)
if names != []:
Title = ", ".join(names)+" - "+Title
except ImportError:
# just do nothing
pass
# change the LabelMaps to an Image, so we can look at them easily
if 'LabelMap' in dir(itk) and img.GetNameOfClass() == 'LabelMap':
# retreive the biggest label in the label map
maxLabel = img.GetNthLabelObject( img.GetNumberOfLabelObjects() - 1 ).GetLabel()
# search for a filter to convert the label map
label_image_type = sorted( [params[1] for params in itk.LabelMapToLabelImageFilter.keys() if params[0] == class_(img) and itk.NumericTraits[itk.template(params[1])[1][0]].max() >= maxLabel ] )[0]
convert = itk.LabelMapToLabelImageFilter[ img, label_image_type ].New( img )
convert.Update()
img = convert.GetOutput()
# this is a label image - force the parameter
Label = True
write(img, self.__tmpFile__.name, compress)
# now run imview
import os
if Label:
os.system( label_command % {"image":self.__tmpFile__.name, "title": Title} )
else:
os.system( command % {"image":self.__tmpFile__.name, "title": Title} )
#tmpFile.close()
class templated_class:
"""This class is used to mimic the behavior of the templated C++ classes.
It is used that way:
class CustomClass:
# class definition here
CustomClass = templated_class(CustomClass)
customObject = CustomClass[template, parameters].New()
The template parameters are passed to the custom class constructor as a named parameter
'template_parameters' in a tuple.
The custom class may implement a static method check_template_parameters(parameters)
which should raise an exception if the template parameters provided are not suitable
to instantiate the custom class.
"""
def __init__(self, cls):
"""cls is the custom class
"""
self.__cls__ = cls
self.__templates__ = {}
def New(self, *args, **kargs):
"""Use the parameters to infer the types of the template parameters.
"""
# extract the types from the arguments to instantiate the class
import itk
types = tuple(itk.class_(o) for o in args)
return self[types].New(*args, **kargs)
def __getitem__(self, template_parameters):
"""Return a pair class-template parameters ready to be instantiated.
The template parameters may be validated if the custom class provide the static
method check_template_parameters(parameters).
"""
if not isinstance(template_parameters, tuple):
template_parameters = (template_parameters,)
return templated_class.__templated_class_and_parameters__(self, template_parameters)
def check_template_parameters(self, template_parameters):
"""Check the template parameters passed in parameter.
"""
# this method is there mainly to make possible to reuse it in the custom class
# constructor after having used templated_class(). Without that, the following
# example doesn't work:
#
# class CustomClass:
# def __init__(self, *args, **kargs):
# template_parameters = kargs["template_parameters"]
# CustomClass.check_template_parameters(template_parameters)
# # other init stuff
# def check_template_parameters(template_parameters):
# # check, really
# pass
# CustomClass = templated_class(CustomClass)
#
self.__cls__.check_template_parameters(template_parameters)
def add_template(self, name, params):
if not isinstance(params, list) and not isinstance(params, tuple):
params = (params,)
params = tuple(params)
val = self[params]
self.__templates__[params] = val
setattr(self, name, val)
def add_image_templates(self, *args):
import itk
if args == []:
return
combinations = [[t] for t in args[0]]
for types in args[1:]:
temp = []
for t in types:
for c in combinations:
temp.append(c+[t])
combinations = temp
for d in itk.DIMS:
for c in combinations:
parameters = []
name = ""
for t in c:
parameters.append( itk.Image[t, d] )
name += "I"+t.short_name+str(d)
self.add_template(name, tuple(parameters))
class __templated_class_and_parameters__:
"""Inner class used to store the pair class-template parameters ready to instantiate.
"""
def __init__(self, templated_class, template_parameters):
self.__templated_class__ = templated_class
self.__template_parameters__ = template_parameters
if "check_template_parameters" in dir(templated_class.__cls__):
templated_class.__cls__.check_template_parameters(template_parameters)
def New(self, *args, **kargs):
"""A New() method to mimic the ITK default behavior, even if the class doesn't provide any New() method.
"""
kargs["template_parameters"] = self.__template_parameters__
if "New" in dir(self.__templated_class__.__cls__):
obj = self.__templated_class__.__cls__.New(*args, **kargs)
else:
obj = self.__templated_class__.__cls__(*args, **kargs)
setattr(obj, "__template_parameters__", self.__template_parameters__)
setattr(obj, "__templated_class__", self.__templated_class__)
return obj
def __call__(self, *args, **kargs):
return self.New(*args, **kargs)
def keys(self):
return self.__templates__.keys()
# everything after this comment is for dict interface
# and is a copy/paste from DictMixin
# only methods to edit dictionary are not there
def __iter__(self):
for k in self.keys():
yield k
def has_key(self,key):
try:
value=self[key]
except KeyError:
return False
return True
def __contains__(self,key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k,self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _,v in self.iteritems():
yield v
def values(self):
return [v for _,v in self.iteritems()]
def items(self):
return list(self.iteritems())
def get(self,key,default=None):
try:
return self[key]
except KeyError:
return default
def __len__(self):
return len(self.keys())
class pipeline:
"""A convenient class to store the reference to the filters of a pipeline
With this class, a method can create a pipeline of several filters and return
it without losing the references to the filters in this pipeline. The pipeline
object act almost like a filter (it has a GetOutput() method) and thus can
be simply integrated in another pipeline.
"""
def __init__( self, *args, **kargs ):
self.clear()
self.input = None
set_inputs( self, args, kargs )
def connect( self, filter ):
"""Connect a new filter to the pipeline
The output of the first filter will be used as the input of this
one and the filter passed as parameter will be added to the list
"""
if self.GetOutput() != None:
set_inputs(filter, [self.GetOutput()] )
self.append( filter )
def append( self, filter ):
"""Add a new filter to the pipeline
The new filter will not be connected. The user must connect it.
"""
self.filters.append( filter )
def clear( self ):
"""Clear the filter list
"""
self.filters = []
def GetOutput( self, index=0 ):
"""Return the output of the pipeline
If another output is needed, use
pipeline.filters[-1].GetAnotherOutput() instead of this method, subclass
pipeline to implement another GetOutput() method, or use expose()
"""
if len(self.filters) == 0:
return self.GetInput()
else :
filter = self.filters[-1]
if hasattr(filter, "__getitem__"):
return filter[index]
try:
return filter.GetOutput(index)
except:
if index == 0:
return filter.GetOutput()
else:
raise ValueError("Index can only be 0 on that object")
def SetInput( self, input ):
"""Set the input of the pipeline
"""
if len(self.filters) != 0:
set_inputs(self.filters[0], [input])
self.input = input
def GetInput( self ):
"""Get the input of the pipeline
"""
return self.input
def Update( self ):
"""Update the pipeline
"""
if len(self.filters) > 0:
return self.filters[-1].Update()
def UpdateLargestPossibleRegion( self ):
"""Update the pipeline
"""
if len(self.filters) > 0:
return self.filters[-1].UpdateLargestPossibleRegion()
def UpdateOutputInformation( self ):
if "UpdateOutputInformation" in dir(self.filters[-1]):
self.filters[-1].UpdateOutputInformation()
else:
self.Update()
def __len__(self):
if len(self.filters) == 0:
return 1
else:
return self.filters[-1].GetNumberOfOutputs()
def __getitem__(self, item):
return self.GetOutput( item )
def __call__(self, *args, **kargs):
set_inputs( self, args, kargs )
self.UpdateLargestPossibleRegion()
return self
def expose(self, name, new_name=None, position=-1):
"""Expose an attribute from a filter of the minipeline.
Once called, the pipeline instance has a new Set/Get set of methods to access
directly the corresponding method of one of the filter of the pipeline.
Ex: p.expose( "Radius" )
p.SetRadius( 5 )
p.GetRadius( 5 )
By default, the attribute usable on the pipeline instance has the same name than
the one of the filter, but it can be changed by providing a value to new_name.
The last filter of the pipeline is used by default, but another one may be used
by giving its position.
Ex: p.expose("Radius", "SmoothingNeighborhood", 2)
p.GetSmoothingNeighborhood()
"""
if new_name == None:
new_name = name
src = self.filters[position]
ok = False
set_name = "Set" + name
if set_name in dir(src):
setattr(self, "Set" + new_name, getattr(src, set_name))
ok = True
get_name = "Get" + name
if get_name in dir(src):
setattr(self, "Get" + new_name, getattr(src, get_name))
ok = True
if not ok:
raise RuntimeError("No attribute %s at position %s." % (name, position))
class auto_pipeline(pipeline):
current = None
def __init__(self, *args, **kargs):
pipeline.__init__(self, *args, **kargs)
self.Start()
def Start(self):
auto_pipeline.current = self
def Stop(self):
auto_pipeline.current = None
def down_cast(obj):
"""Down cast an itkLightObject (or a object of a subclass) to its most specialized type.
"""
import itk, itkTemplate
className = obj.GetNameOfClass()
t = getattr(itk, className)
if isinstance(t, itkTemplate.itkTemplate):
for c in t.values():
try:
return c.cast(obj)
except:
# fail silently for now
pass
raise RuntimeError("Can't downcast to a specialization of %s" % className)
else:
return t.cast(obj)
def attribute_list( i, name ):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(i, Attribute=name, ReverseOrdering=True, InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l = []
for i in range(1, r.GetNumberOfLabelObjects()+1):
l.append( r.GetLabelObject(i).__getattribute__("Get"+name)() )
return l
def attributes_list( i, names ):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(i, Attribute=names[0], ReverseOrdering=True, InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l = []
for i in range(1, r.GetNumberOfLabelObjects()+1):
attrs = []
for name in names :
attrs.append( r.GetLabelObject(i).__getattribute__("Get"+name)() )
l.append( tuple( attrs ) )
return l
def attribute_dict( i, name ):
"""Returns a dict with the attribute values in keys and a list of the corresponding objects in value
i: the input LabelImage
name: the name of the attribute
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(i, Attribute=name, ReverseOrdering=True, InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
d = {}
for i in range(1, r.GetNumberOfLabelObjects()+1):
lo = r.GetLabelObject(i)
v = lo.__getattribute__("Get"+name)()
l = d.get( v, [] )
l.append( lo )
d[v] = l
return d
def number_of_objects( i ):
"""Returns the number of objets in the image.
i: the input LabelImage
"""
import itk
i.UpdateLargestPossibleRegion()
i = itk.output(i)
return i.GetNumberOfLabelObjects()
def ipython_kw_matches(text):
"""Match named ITK object's named parameters"""
import IPython.ipapi, itk, re, inspect, itkTemplate
regexp = re.compile(r'''
'.*?' | # single quoted strings or
".*?" | # double quoted strings or
\w+ | # identifier
\S # other characters
''', re.VERBOSE | re.DOTALL)
ip = IPython.ipapi.get()
if "." in text: # a parameter cannot be dotted
return []
# 1. find the nearest identifier that comes before an unclosed
# parenthesis e.g. for "foo (1+bar(x), pa", the candidate is "foo".
# Use get_endidx() to find the indentifier at the cursor position
tokens = regexp.findall(ip.IP.Completer.get_line_buffer()[:ip.IP.Completer.get_endidx()])
tokens.reverse()
iterTokens = iter(tokens); openPar = 0
for token in iterTokens:
if token == ')':
openPar -= 1
elif token == '(':
openPar += 1
if openPar > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
isId = re.compile(r'\w+$').match
while True:
try:
ids.append(iterTokens.next())
if not isId(ids[-1]):
ids.pop(); break
if not iterTokens.next() == '.':
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callableMatches = ip.IP.Completer.global_matches(ids[0])
else:
callableMatches = ip.IP.Completer.attr_matches('.'.join(ids[::-1]))
argMatches = []
for callableMatch in callableMatches:
# drop the .New at this end, so we can search in the class members
if callableMatch.endswith(".New"):
callableMatch = callableMatch[:-4]
try:
object = eval(callableMatch, ip.IP.Completer.namespace)
if isinstance(object, itkTemplate.itkTemplate):
# this is a template - lets grab the first entry to search for the methods
object = object.values()[0]
namedArgs = []
if isinstance(object, itk.LightObject) or (inspect.isclass(object) and issubclass(object, itk.LightObject)):
namedArgs = [n[3:] for n in dir(object) if n.startswith("Set")]
except Exception, e:
print e
continue
for namedArg in namedArgs:
if namedArg.startswith(text):
argMatches.append("%s=" %namedArg)
return argMatches
# install progress callback and custom completer if we are in ipython interpreter
try:
import itkConfig, IPython.ipapi
if IPython.ipapi.get():
IPython.ipapi.get().IP.Completer.matchers.insert(0, ipython_kw_matches)
itkConfig.ProgressCallback = terminal_progress_callback
# some cleanup
del itkConfig, IPython
except ImportError:
# fail silently
pass
# now loads the other modules we may found in the same directory
import os.path, sys
directory = os.path.dirname(__file__)
moduleNames = [name[:-len('.py')] for name in os.listdir(directory) if name.endswith('.py') and name != '__init__.py']
for name in moduleNames:
# there should be another way - I don't like to much exec -, but the which one ??
exec "from %s import *" % name
# some cleaning
del directory, os, sys, moduleNames, name
| 32.539823 | 201 | 0.67404 | [
"Apache-2.0"
] | CapeDrew/DCMTK-ITK | Wrapping/WrapITK/Languages/Python/itkExtras/__init__.py | 33,093 | Python |
from mwb_help import deck_is_available, create_deck,\
model_is_available, add_model, add_note
from scraper_lxml import get_note_default, get_note_simple
class AnkiDutchDeck():
def __init__(self, deck_name=None):
if deck_name is None:
deck_name = 'tidbits'
if not deck_is_available(deck_name):
create_deck(deck_name)
self.deck_name = deck_name
self.default_model_name = 'dutch_default'
self.simple_model_name = 'dutch_simple'
if not model_is_available(self.default_model_name):
self.add_model_default()
if not model_is_available(self.simple_model_name):
self.add_model_simple()
def add_model_default(self):
model_name = 'dutch_default'
note_fields = ['Dutch', 'Misc', 'Explanations', 'Examples']
card_templates = [{'Front': '{{Dutch}}',
'Back' : '{{Misc}}<hr><hr>{{Explanations}}'},
{'Front': '{{Dutch}}<hr>{{Misc}}',
'Back' : '{{Explanations}}<hr><hr>{{Examples}}'},
{'Front': '{{Explanations}}',
'Back' : '{{Dutch}}<hr>{{Misc}}<hr><hr>{{Examples}}'},
{'Front': '{{Examples}}',
'Back' : '{{Dutch}}<hr>{{Misc}}<hr><hr>{{Explanations}}'}]
add_model(model_name, note_fields, card_templates)
def add_model_simple(self):
model_name = 'dutch_simple'
note_fields = ['Dutch', 'Explanations']
card_templates = [{'Front': '{{Dutch}}',
'Back' : '{{Explanations}}'},
{'Front': '{{Explanations}}',
'Back' : '{{Dutch}}'}]
add_model(model_name, note_fields, card_templates)
def add_note_default(self, note_fields):
add_note(note_fields, self.deck_name, self.default_model_name)
def add_note_simple(self, note_fields):
add_note(note_fields, self.deck_name, self.simple_model_name)
def add_note_from_word(self, word, output_file=None):
is_default_model = True
try:
note_fields = get_note_default(word)
if note_fields is None:
print(f'"{word}" not found in mijnwoordenbook')
if output_file:
with open(output_file, 'a') as f:
f.write('\n')
f.write(word)
return
except:
is_default_model = False
note_fields = get_note_simple(word)
try:
if is_default_model:
self.add_note_default(note_fields)
else:
self.add_note_simple(note_fields)
except:
print(f'"{word}" already exists in deck {self.deck_name}')
def add_note_from_list(self, word_list, output_file=None):
[self.add_note_from_word(w, output_file) for w in word_list]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='file containing word list')
parser.add_argument('-f', '--file',
help='select file containing word list')
parser.add_argument('-l', '--list',
help='input word list, separated by comma')
parser.add_argument('-o', '--output',
help='output unfound words to file')
args = parser.parse_args()
word_list = []
if args.file is not None:
words = open(args.file).read().split('\n')
word_list.extend(words)
if args.list is not None:
words = args.list.strip().split(',')
word_list.extend(words)
# word_list = ['hhhsss', 'duits', 'alsjeblieft', 'waterpokken']
if "" in word_list:
word_list.remove("")
ADD = AnkiDutchDeck()
ADD.add_note_from_list(word_list, args.output)
| 37.737864 | 85 | 0.563931 | [
"MIT"
] | gaganpreet/learning-dutch | add_cards.py | 3,887 | Python |
#!/usr/bin/env python
from distutils.core import setup
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='pyzkaccess',
description='Python interface to ZKTeco ZKAccess C3-100/200/400 controllers',
version='0.2',
author='Igor Derkach',
author_email='gosha753951@gmail.com',
url='https://github.com/bdragon300/pyzkaccess',
license='Apache 2.0',
python_requires='>=3.5',
packages=setuptools.find_packages(exclude=['tests', 'docs']),
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: Microsoft :: Windows',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Topic :: System :: Hardware'
],
# Also tox.ini
install_requires=[
'wrapt',
'pydantic'
],
)
| 29.897436 | 81 | 0.632933 | [
"Apache-2.0"
] | cybrnode/pyzkaccess | setup.py | 1,166 | Python |
from genomics_demo.dna import DNA
import pytest
def test_bad_sequence_raises_error():
with pytest.raises(ValueError):
DNA('ATB')
def test_complimentary_sequence_works():
assert DNA('GTC').complimentary_sequence == DNA('CAG')
assert DNA('ATC').complimentary_sequence == DNA('TAG')
assert DNA('GTC').complimentary_sequence == DNA('CAG')
assert DNA('ATC').complimentary_sequence == DNA('TAG')
def test_gc_content():
assert DNA('ATTTATGGCC').gc_content == 0.4
assert DNA('AGGTATGGCC').gc_content == 0.6
assert DNA('ATAT').gc_content == 0
def test_triplets():
assert DNA('AAA').split_DNA_triplets == ['AAA']
assert DNA('AAATTTGGG').split_DNA_triplets == ['AAA','TTT','GGG']
assert DNA('AAAT').split_DNA_triplets == ['AAA','T']
def test_find_start():
assert DNA('ATGGG').find_first_start_site == 0
assert DNA('CCCCATG').find_first_start_site == 4
with pytest.raises(TypeError):
DNA('GGG').find_first_start_site
assert DNA('GTC').compliment == DNA('CAG')
assert DNA('ATC').compliment == DNA('TAG')
def test_find_start_codons():
"""New test to test the function to find start codons"""
assert DNA('ATGGTACATGCGA').find_start_codons() == [0, 7]
def test_transcribe():
assert DNA('GTC').transcribe() == 'GAC'
assert DNA('ATC').transcribe() == 'GAU'
#def test_gc_content_sequence_works():
# assert DNA('GC').gc_content > 0.5
def test_is_gc_rich():
assert DNA('GTGT').gc_content() == 0.5
# length = len(sequence)
# c_count = sequence.upper().count('C')
# g_count = sequence.upp
def test_gc_content():
assert DNA('ATTTATGGCC').gc_content == 0.4
assert DNA('AGGTATGGCC').gc_content == 0.6
assert DNA('ATAT').gc_content == 0
| 29.183333 | 69 | 0.670474 | [
"MIT"
] | nickdelgrosso/genomics_workshop_demo | tests/test_dna.py | 1,751 | Python |
import json
def read_cfg(model_repository):
pass | 11 | 31 | 0.745455 | [
"MIT"
] | drunkcoding/model-inference | service/server_cfg.py | 55 | Python |