code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN config."""
import dataclasses
from acme.adders import reverb as adders_reverb
import numpy as np
@dataclasses.dataclass
class DQNConfig:
"""Configuration options for DQN agent."""
epsilon: float = 0.05 # Action selection via epsilon-greedy policy.
# TODO(b/191706065): update all clients and remove this field.
seed: int = 1 # Random seed.
# Learning rule
learning_rate: float = 1e-3 # Learning rate for Adam optimizer.
adam_eps: float = 1e-8 # Eps for Adam optimizer.
discount: float = 0.99 # Discount rate applied to value per timestep.
n_step: int = 5 # N-step TD learning.
target_update_period: int = 100 # Update target network every period.
max_gradient_norm: float = np.inf # For gradient clipping.
# Replay options
batch_size: int = 256 # Number of transitions per batch.
min_replay_size: int = 1_000 # Minimum replay size.
max_replay_size: int = 1_000_000 # Maximum replay size.
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
importance_sampling_exponent: float = 0.2 # Importance sampling for replay.
priority_exponent: float = 0.6 # Priority exponent for replay.
prefetch_size: int = 4 # Prefetch size for reverb replay performance.
samples_per_insert: float = 0.5 # Ratio of learning samples to insert.
# Rate to be used for the SampleToInsertRatio rate limitter tolerance.
# See a formula in make_replay_tables for more details.
samples_per_insert_tolerance_rate: float = 0.1
# How many gradient updates to perform per learner step.
num_sgd_steps_per_step: int = 1
@dataclasses.dataclass
class DQNEmpowermentConfig:
"""Configuration options for DQN agent."""
epsilon: float = 0.05 # Action selection via epsilon-greedy policy.
# TODO(b/191706065): update all clients and remove this field.
seed: int = 1 # Random seed.
# Learning rule
learning_rate: float = 1e-3 # Learning rate for Adam optimizer.
adam_eps: float = 1e-8 # Eps for Adam optimizer.
discount: float = 0.99 # Discount rate applied to value per timestep.
n_step: int = 5 # N-step TD learning.
target_update_period: int = 100 # Update target network every period.
max_gradient_norm: float = np.inf # For gradient clipping.
# Replay options
batch_size: int = 256 # Number of transitions per batch.
min_replay_size: int = 1_000 # Minimum replay size.
max_replay_size: int = 1_000_000 # Maximum replay size.
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
importance_sampling_exponent: float = 0.2 # Importance sampling for replay.
priority_exponent: float = 0.6 # Priority exponent for replay.
prefetch_size: int = 4 # Prefetch size for reverb replay performance.
samples_per_insert: float = 0.5 # Ratio of learning samples to insert.
sequence_length: int = 10
prefetch_size: int = 4
sequence_period: int = 2
# Rate to be used for the SampleToInsertRatio rate limitter tolerance.
# See a formula in make_replay_tables for more details.
samples_per_insert_tolerance_rate: float = 0.1
# How many gradient updates to perform per learner step.
num_sgd_steps_per_step: int = 1
| [
[
[
652,
663
],
[
735,
746
],
[
2200,
2211
]
],
[
[
689,
712
],
[
1595,
1608
],
[
3071,
3084
]
],
[
[
720,
731
],
[
1341,
1343
],
[
2817,
2819
]
],
[
[
763,
772
]
],
[
[
2228,
2248
]
]
] |
import torch
from torch import nn
from torch.distributions import MultivariateNormal
class Normal(nn.Module):
def __init__(self, num_vars=100):
super(Normal, self).__init__()
self.num_vars = num_vars
self.means = nn.Parameter(torch.zeros(num_vars))
self.std = nn.Parameter(torch.eye(num_vars))
def log_prob(self, x):
distr = MultivariateNormal(self.means, self.std)
return distr.log_prob(x)
def sample(self, num_samples):
distr = MultivariateNormal(self.means, self.std)
return distr.sample_n(num_samples)
| [
[
[
7,
12
],
[
257,
262
],
[
312,
317
]
],
[
[
31,
33
],
[
99,
101
],
[
244,
246
],
[
299,
301
]
],
[
[
66,
84
],
[
377,
395
],
[
503,
521
]
],
[
[
92,
98
],
[
163,
169
]
]
] |
import sys
class SortableArray():
def __init__(self, arr):
self.arr = arr
def partition(self, left, right):
# choose right most as pivot
pivot_index = right
# get pivot value for compares
pivot = self.arr[pivot_index]
right -= 1
print(f'left orig: {left} right orig: {right}')
while True:
# move left pointer until we hit a value >= pivot
while self.arr[left] < pivot:
print(f'left: {left}')
left += 1
print('left', left)
# move right until hit a value <= pivot or hits 0 index
while right > 0 and self.arr[right] > pivot:
print(f'right: {right}')
right -= 1
print('right', right)
# if left >= right then we break and swap
if left >= right:
break
# if not we swap right and left and continue
else:
self.arr[left], self.arr[right] = self.arr[right], self.arr[left]
left += 1
# finally swap left and pivot
self.arr[left], self.arr[pivot_index] = self.arr[pivot_index], self.arr[left]
print(self.arr)
return left
def quicksort(self, left, right):
# base case one element
if right - left <= 0:
return
# partition and get pivot
pivot_index = self.partition(left, right)
# recursively call for left partition
self.quicksort(left, pivot_index - 1)
# recursively call for right partition
self.quicksort(pivot_index + 1, right)
def quickselect(self, kth_lowest_num, left, right):
# base case one element
if right - left <= 0:
return self.arr[left]
# partition and get pivot
pivot_index = self.partition(left, right)
# if kth is less than pivot, recursively call for left
if kth_lowest_num < pivot_index:
self.quickselect(kth_lowest_num, left, pivot_index - 1)
# if kth is greater than pivot, recursively call for right
elif kth_lowest_num > pivot_index:
self.quickselect(kth_lowest_num, pivot_index + 1, right)
# else we have the kth num bc kth = pivot_index
else:
# weird error returns a None
print(f'kth {kth_lowest_num}: {self.arr[pivot_index]}')
return self.arr[pivot_index]
def main(arr, kth):
sortable = SortableArray(arr)
print(sortable.quickselect(kth, 0, len(arr) - 1))
sortable.quicksort(0, len(arr) - 1)
print(f'final sorted array: {sortable.arr}')
if __name__ == '__main__':
main([int(x) for x in sys.argv[1].split(',')], int(sys.argv[2]))
| [
[
[
7,
10
],
[
2295,
2298
],
[
2324,
2327
]
],
[
[
18,
31
],
[
2091,
2104
]
],
[
[
2063,
2067
],
[
2273,
2277
]
]
] |
"""
Classes to contextualize math operations in log vs linear space.
"""
from types import MethodType
import numpy as np
from ..exceptions import InvalidBase
__all__ = (
'get_ops',
'LinearOperations',
'LogOperations',
)
# For 2.x, these are ascii strings. For 3.x these are unicode strings.
acceptable_base_strings = {'linear', 'e'}
def get_ops(base):
"""
Returns an *Operations instance, depending on the base.
Parameters
----------
base : float, 'linear', 'e'
The base for the Operations instance.
"""
# Let's not initialize unless we have to.
if base in cache:
ops = cache[base]
else:
# This assumes that 'linear' is in cache.
ops = LogOperations(base)
cache[base] = ops
return ops
def exp_func(b):
"""
Returns a base-`b` exponential function.
Parameters
----------
b : positive float or 'e'
The base of the desired exponential function.
Returns
-------
exp : function
The base-`b` exponential function. The returned function will operate
elementwise on NumPy arrays, but note, it is not a ufunc.
Examples
--------
>>> exp2 = exp_func(2)
>>> exp2(1)
2.0
>>> exp3 = exp_func(3)
>>> exp3(1)
3.0
Raises
------
InvalidBase
If the base is less than zero or equal to one.
"""
from dit.utils import is_string_like
if is_string_like(b) and b not in acceptable_base_strings:
raise InvalidBase(msg=b)
if b == 'linear':
exp = lambda x: x # pragma: no branch
elif b == 2:
exp = np.exp2
elif b == 10:
exp = lambda x: 10**x
elif b == 'e' or np.isclose(b, np.e):
exp = np.exp
else:
if b <= 0 or b == 1:
raise InvalidBase(b)
def exp(x, base=b):
"""
Return `base`**`x`
Parameters
----------
x : float
The number to exponentiate
base : float
The base of the exponential
Returns
-------
p : float
`base`**`x`
"""
return base**np.asarray(x)
return exp
def log_func(b):
"""
Returns a base-`b` logarithm function.
Parameters
----------
b : positive float or 'e'
The base of the desired logarithm function.
Returns
-------
log : function
The base-`b` logarithm function. The returned function will operate
elementwise on NumPy arrays, but note, it is not a ufunc.
Examples
--------
>>> log2 = log_func(2)
>>> log2(2)
1.0
>>> log3 = log_func(3)
>>> log3(3)
1.0
Raises
------
InvalidBase
If the base is less than zero or equal to one.
"""
from dit.utils import is_string_like
if is_string_like(b) and b not in acceptable_base_strings:
raise InvalidBase(msg=b)
if b == 'linear':
log = lambda x: x # pragma: no branch
elif b == 2:
log = np.log2
elif b == 10:
log = np.log10
elif b == 'e' or np.isclose(b, np.e):
log = np.log
else:
if b <= 0 or b == 1:
raise InvalidBase(b)
Z = np.log(b)
def log(x, func=np.log):
"""
Return the log of `x`
Parameters
----------
x : float
The value to take the log of
func : function
A logarithm function
Returns
-------
log : float
The logarithm of `x` in base `b` (from outer scope)
"""
return func(x) / Z
return log
class Operations(object):
"""
Base class which implements certain math operations.
For example, regular addition with log probabilities is handled specially.
While we could implement many more operations, we do not. Their usage
is uncommon and their implementation would be slower as well. For example,
subtraction with log probabailities must go as:
.. math::
log_2(x-y) = log_2(x) + log_2(1 - 2^[ log_2(y) - log_2(x) ])
Note that if :math:`y > x`, then :math:`log(y) > log(x)` and the inner term
of the second logarithm will be less than 0, yielding NaN.
"""
### Do we allow base == 'e' or should we convert to its numerical value?
### Ans: We store whatever was specified but provide get_base() with an
### option to return a numerical base.
one = None
zero = None
base = None
exp = None
log = None
def get_base(self, numerical=False):
"""
Returns the base in which operations take place.
For linear-based operations, the result is 'linear'.
Parameters
----------
numerical : bool
If `True`, then if the base is 'e', it is returned as a float.
"""
if numerical and self.base == 'e':
base = np.exp(1)
else:
base = self.base
return base
def is_null(self, p):
"""
Returns `True` if `p` is a null probability.
Parameters
----------
p : float
The probability to be tested.
"""
return np.isclose(self.zero, p)
def is_null_exact(self, p):
"""
Returns `True` if `p` is exactly a null probability.
Parameters
----------
p : float
The probability to be tested.
"""
return self.zero == p
def add(self, x, y):
""" Abstract base class """
raise NotImplementedError
def add_inplace(self, x, y):
""" Abstract base class """
raise NotImplementedError
def add_reduce(self, x):
""" Abstract base class """
raise NotImplementedError
def mult(self, x, y):
""" Abstract base class """
raise NotImplementedError
def mult_inplace(self, x, y):
""" Abstract base class """
raise NotImplementedError
def mult_reduce(self, x):
""" Abstract base class """
raise NotImplementedError
def invert(self, x):
""" Abstract base class """
raise NotImplementedError
def normalize(self, x):
""" Abstract base class """
raise NotImplementedError
class LinearOperations(Operations):
"""
The class of operations on linear values.
"""
one = 1
zero = 0
base = 'linear'
# If the functions below are standard Python functions (as opposed to
# NumPy ufuncs), then they will be treated as unbound methods for the class.
# During instantiation, they are bound to the instance (since before
# instantiation they are class methods) and thus, we are left with
# bound methods (undesirably). If we had modified these attributes in the
# __init__ function, then they would not be bound (or even unbound methods)
# but functions instead (desirably). This is precisely what LogOperations
# does, which is why it does not have this issue. An alternative approach
# is to explicitly declare these functions to be static methods, as we
# do below.
#
exp = staticmethod(exp_func(base))
log = staticmethod(log_func(base))
def add(self, x, y):
"""
Add the arrays element-wise. Neither x nor y will be modified.
Assumption: :math:`y >= 0`.
Operation: :math:`z[i] = x[i] + y[i]`
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to add.
Returns
-------
z : NumPy array, shape (n,)
The resultant array.
"""
z = x + y
return z
def add_inplace(self, x, y):
"""
Adds `y` to `x`, in-place. `x` will be modified, but `y` will not.
Assumption: :math:`y >= 0`.
Operation: :math:`x[i] += y[i]`
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to add.
Returns
-------
x : NumPy array, shape (n,)
The resultant array.
"""
x += y
return x
def add_reduce(self, x, axis=None):
"""
Performs an `addition' reduction on `x`.
Assumption: :math:`y >= 0`.
Operation: :math:`z = \\sum_i x[i]`
Returns
-------
z : float
The summation of the elements in `x`.
"""
z = x.sum(axis=axis)
return z
def mult(self, x, y):
"""
Multiplies the arrays element-wise. Neither x nor y will be modified.
Operation: :math:`z[i] = x[i] * y[i]`
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to multiply.
Returns
-------
z : NumPy array, shape (n,)
The resultant array.
"""
z = x * y
return z
def mult_inplace(self, x, y):
"""
Multiplies `y` to `x`, in-place. `x` will be modified, but `y` will not.
Operation: :math:`x[i] *= y[i]`
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to multiply.
Returns
-------
x : NumPy array, shape (n,)
The resultant array.
"""
x *= y
return x
def mult_reduce(self, x, axis=None):
"""
Performs an `multiplication' reduction on `x`.
Operation: :math:`z = \\prod_i x[i]`
Returns
-------
z : float
The product of the elements in `x`.
"""
z = np.prod(x, axis=axis)
return z
def invert(self, x):
"""
Returns the element-wise multiplicative inverse of x.
Operation: :math:`z[i] = 1/x[i]`
Parameters
----------
x : NumPy array, shape (n,)
The array to invert.
Returns
-------
z : NumPy array, shape (n,)
The inverted array.
"""
z = 1 / x
return z
def normalize(self, x, axis=None):
"""
Returns a normalized version of x.
Operation: :math:`z[i] = x[i] / sum(x)`
If x is 2D and axis is None, then normalization is over all elements.
Use axis=-1 to normalize each row of x.
Parameters
----------
x : NumPy array, shape (n,)
The array to normalize.
Returns
-------
z : NumPy array, shape (n,)
The normalized array.
"""
z = x / x.sum(axis=None)
return z
def set_add(ops):
"""
Set the add method on the LogOperations instance.
"""
# To preserve numerical accuracy, we must make use of a logaddexp
# function. These functions only exist in Numpy for base-e and base-2.
# For all other bases, we must convert and then convert back.
# In each case, we use default arguments to make the function that we
# are calling 'local'.
base = ops.base
if base == 2:
def add(self, x, y, func=np.logaddexp2):
return func(x, y)
elif base == 'e' or np.isclose(base, np.e):
def add(self, x, y, func=np.logaddexp):
return func(x, y)
else:
# No need to optimize this...
def add(self, x, y):
# Convert log_b probabilities to log_2 probabilities.
x2 = x * np.log2(base)
y2 = y * np.log2(base)
z = np.logaddexp2(x2, y2)
# Convert log_2 probabilities to log_b probabilities.
z *= self.log(2)
return z
add.__doc__ = """
Add the arrays element-wise. Neither x nor y will be modified.
Assumption: y <= 0.
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to add.
Returns
-------
z : NumPy array, shape (n,)
The resultant array.
"""
ops.add = MethodType(add, ops)
def set_add_inplace(ops):
"""
Set the add_inplace method on the LogOperations instance.
"""
base = ops.base
if base == 2:
def add_inplace(self, x, y, func=np.logaddexp2):
return func(x, y, x)
elif base == 'e' or np.isclose(base, np.e):
def add_inplace(self, x, y, func=np.logaddexp):
return func(x, y, x)
else:
def add_inplace(self, x, y):
x *= np.log2(base)
y2 = y * np.log2(base)
np.logaddexp2(x, y2, x)
x *= self.log(2)
return x
add_inplace.__doc__ = """
Adds `y` to `x`, in-place. `x` will be modified, but `y` will not.
Assumption: :math:`y <= 0`.
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to add.
Returns
-------
x : NumPy array, shape (n,)
The resultant array.
"""
ops.add_inplace = MethodType(add_inplace, ops)
def set_add_reduce(ops):
"""
Set the add_reduce method on the LogOperations instance.
"""
# https://github.com/numpy/numpy/issues/4599
base = ops.base
if base == 2:
def add_reduce(self, x, axis=None, func=np.logaddexp2):
if len(x) == 0:
# Since logaddexp.identity is None, we handle it separately.
z = self.zero
else:
# Note, we are converting to a NumPy array, if necessary.
z = func.reduce(x, axis=axis, dtype=float)
return z
elif base == 'e' or np.isclose(base, np.e):
def add_reduce(self, x, axis=None, func=np.logaddexp):
if len(x) == 0:
# Since logaddexp.identity is None, we handle it separately.
z = self.zero
else:
# Note, we are converting to a NumPy array, if necessary.
z = func.reduce(x, axis=axis, dtype=float)
return z
else:
def add_reduce(self, x, axis=None):
if len(x) == 0:
# Since logaddexp.identity is None, we handle it separately.
z = self.zero
else:
# Note, we are converting to a NumPy array, if necessary.
# Change the base-2, add, and then convert back.
x2 = x * np.log2(base)
z = np.logaddexp2.reduce(x2, axis=axis, dtype=float)
z /= np.log2(base)
return z
add_reduce.__doc__ = """
Performs an `addition' reduction on `x`.
Assumption: :math:`y <= 0`.
Returns
-------
z : float
The summation of the elements in `x`.
"""
ops.add_reduce = MethodType(add_reduce, ops)
class LogOperations(Operations):
one = None
zero = None
base = None
exp = None
log = None
def __init__(self, base):
"""
Initialize the log operation manager.
Parameters
----------
base : float
The base of the logarithm.
"""
self.set_base(base)
def set_base(self, base):
"""
Change the base of the logarithm.
Parameters
----------
base : float
The base of the logarithm.
"""
self.base = base
self.exp = exp_func(base)
self.log = log_func(base)
# Note: When base < 1, zero == +inf. When base > 1, zero == -inf.
self.one = self.log(1)
self.zero = self.log(0)
# Update the add methods.
set_add(self)
set_add_inplace(self)
set_add_reduce(self)
def mult(self, x, y):
"""
Multiplies the arrays element-wise. Neither `x` nor `y` will be modified.
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to multiply.
Returns
-------
z : NumPy array, shape (n,)
The resultant array.
"""
z = x + y
return z
def mult_inplace(self, x, y):
"""
Multiplies `y` to `x`, in-place. `x` will be modified, but `y` will not.
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to multiply.
Returns
-------
x : NumPy array, shape (n,)
The resultant array.
"""
x += y
return x
def mult_reduce(self, x, axis=None):
"""
Performs an `multiplication' reduction on `x`.
Returns
-------
z : float
The product of the elements in `x`.
"""
# The identity for addition in NumPy is zero.
# This corresponds to an identity of 1 for log operations, and this is
# exactly the desired identity for multiplying probabilities.
z = x.sum(axis=axis)
return z
def invert(self, x):
"""
Returns the element-wise multiplicative inverse of `x`: :math:`1/x`.
Parameters
----------
x : NumPy array, shape (n,)
The array to invert.
Returns
-------
z : NumPy array, shape (n,)
The inverted array.
"""
z = -x
return z
def normalize(self, x, axis=None):
"""
Returns a normalized version of `x`.
Non-log equivalent operation: :math:`z[i] = x[i] / sum(x)`
If `x` is 2D and axis is None, then normalization is over all elements.
Use axis=-1 to normalize each row of `x`.
Parameters
----------
x : NumPy array, shape (n,)
The array to normalize.
Returns
-------
z : NumPy array, shape (n,)
The normalized array.
"""
# The API way would be: mult(x, invert( add_reduce(x) ))
# We'll avoid some of those function calls.
z = x - self.add_reduce(x, axis=axis)
return z
cache = {
'linear': LinearOperations(),
2: LogOperations(2),
'e': LogOperations('e')
}
| [
[
[
92,
102
],
[
12050,
12060
],
[
12989,
12999
],
[
14734,
14744
]
],
[
[
111,
122
],
[
1634,
1636
],
[
1711,
1713
],
[
1725,
1727
],
[
1746,
1748
],
[
3084,
3086
],
[
3124,
3126
],
[
3154,
3156
],
[
3168,
3170
],
[
3189,
3191
],
[
3281,
3283
],
[
3316,
3318
],
[
5035,
5037
],
[
5327,
5329
],
[
9730,
9732
],
[
11192,
11194
],
[
11262,
11264
],
[
11279,
11281
],
[
11319,
11321
],
[
12257,
12259
],
[
12330,
12332
],
[
12347,
12349
],
[
12395,
12397
],
[
13258,
13260
],
[
13606,
13608
],
[
13623,
13625
],
[
13678,
13680
],
[
2214,
2216
],
[
11528,
11530
],
[
11563,
11565
],
[
11593,
11595
],
[
12507,
12509
],
[
12542,
12544
],
[
12568,
12570
],
[
14372,
14374
],
[
14406,
14408
],
[
14476,
14478
]
],
[
[
149,
160
],
[
1514,
1525
],
[
1810,
1821
],
[
2964,
2975
],
[
3253,
3264
]
],
[
[
163,
170
]
],
[
[
310,
333
],
[
1475,
1498
],
[
2925,
2948
]
],
[
[
358,
365
]
],
[
[
794,
802
],
[
7279,
7287
],
[
15345,
15353
]
],
[
[
2250,
2258
],
[
7318,
7326
],
[
15379,
15387
]
],
[
[
3758,
3768
],
[
6423,
6433
],
[
14784,
14794
]
],
[
[
6406,
6422
],
[
17994,
18010
]
],
[
[
10722,
10729
],
[
15574,
15581
]
],
[
[
12077,
12092
],
[
15596,
15611
]
],
[
[
13024,
13038
],
[
15626,
15640
]
],
[
[
14770,
14783
],
[
18021,
18034
],
[
18048,
18061
],
[
727,
740
]
],
[
[
17970,
17975
],
[
620,
625
],
[
641,
646
],
[
755,
760
]
]
] |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import enchant
import os
import pickle
import re
class SpellChecker:
"""
A basic spell checker.
"""
# These must be all lower case for comparisons
uimsgs = {
# OK words
"adaptively", "adaptivity",
"aren", # aren't
"betweens", # yuck! in-betweens!
"boolean", "booleans",
"chamfer",
"couldn", # couldn't
"customizable",
"decrement",
"derivate",
"deterministically",
"doesn", # doesn't
"duplications",
"effector",
"equi", # equi-angular, etc.
"fader",
"globbing",
"hasn", # hasn't
"hetero",
"hoc", # ad-hoc
"incompressible",
"indices",
"instantiation",
"iridas",
"isn", # isn't
"iterable",
"kyrgyz",
"latin",
"merchantability",
"mplayer",
"ons", # add-ons
"pong", # ping pong
"scalable",
"shadeless",
"shouldn", # shouldn't
"smoothen",
"spacings",
"teleport", "teleporting",
"vertices",
"wasn", # wasn't
# Merged words
"antialiasing", "antialias",
"arcsine", "arccosine", "arctangent",
"autoclip",
"autocomplete",
"autoexec",
"autoexecution",
"autogenerated",
"autolock",
"automask", "automasking",
"automerge",
"autoname",
"autopack",
"autosave",
"autoscale",
"autosmooth",
"autosplit",
"backface", "backfacing",
"backimage",
"backscattered",
"bandnoise",
"bindcode",
"bitdepth",
"bitflag", "bitflags",
"bitrate",
"blackbody",
"blendfile",
"blendin",
"bonesize",
"boundbox",
"boxpack",
"buffersize",
"builtin", "builtins",
"bytecode",
"chunksize",
"customdata",
"dataset", "datasets",
"de",
"deadzone",
"deconstruct",
"defocus",
"denoise", "denoised", "denoising", "denoiser",
"deselect", "deselecting", "deselection",
"despill", "despilling",
"dirtree",
"editcurve",
"editmesh",
"filebrowser",
"filelist",
"filename", "filenames",
"filepath", "filepaths",
"forcefield", "forcefields",
"fulldome", "fulldomes",
"fullscreen",
"gridline", "gridlines",
"hardlight",
"hemi",
"hostname",
"inbetween",
"inscatter", "inscattering",
"libdata",
"lightcache",
"lightprobe", "lightprobes",
"lightless",
"lineset",
"linestyle", "linestyles",
"localview",
"lookup", "lookups",
"mathutils",
"micropolygon",
"midlevel",
"midground",
"mixdown",
"monospaced",
"multi",
"multifractal",
"multiframe",
"multilayer",
"multipaint",
"multires", "multiresolution",
"multisampling",
"multiscatter",
"multitexture",
"multithreaded",
"multiuser",
"multiview",
"namespace",
"nodetree", "nodetrees",
"keyconfig",
"offscreen",
"online",
"playhead",
"popup", "popups",
"pointcloud",
"pre",
"precache", "precaching",
"precalculate",
"precomputing",
"prefetch",
"premultiply", "premultiplied",
"prepass",
"prepend",
"preprocess", "preprocessing", "preprocessor",
"preseek",
"promillage",
"pushdown",
"raytree",
"readonly",
"realtime",
"reinject", "reinjected",
"rekey",
"remesh",
"reprojection", "reproject", "reprojecting",
"resize",
"restpose",
"resync",
"retarget", "retargets", "retargeting", "retargeted",
"retiming",
"rigidbody",
"ringnoise",
"rolloff",
"runtime",
"scanline",
"screenshot", "screenshots",
"seekability",
"selfcollision",
"shadowbuffer", "shadowbuffers",
"singletexture",
"spellcheck", "spellchecking",
"startup",
"stateful",
"starfield",
"studiolight",
"subflare", "subflares",
"subframe", "subframes",
"subclass", "subclasses", "subclassing",
"subdirectory", "subdirectories", "subdir", "subdirs",
"subitem",
"submode",
"submodule", "submodules",
"subpath",
"subsize",
"substep", "substeps",
"targetless",
"textbox", "textboxes",
"tilemode",
"timestamp", "timestamps",
"timestep", "timesteps",
"todo",
"tradeoff",
"un",
"unassociate", "unassociated",
"unbake",
"unclosed",
"uncomment",
"unculled",
"undeformed",
"undistort", "undistorted", "undistortion",
"ungroup", "ungrouped",
"unhide",
"unindent",
"unkeyed",
"unlink", "unlinked",
"unmute",
"unphysical",
"unpremultiply",
"unprojected",
"unprotect",
"unreacted",
"unreferenced",
"unregister",
"unselect", "unselected", "unselectable",
"unsets",
"unshadowed",
"unspill",
"unstitchable", "unstitch",
"unsubdivided", "unsubdivide",
"untrusted",
"vectorscope",
"whitespace", "whitespaces",
"worldspace",
"workflow",
"workspace", "workspaces",
# Neologisms, slangs
"affectable",
"animatable",
"automagic", "automagically",
"blobby",
"blockiness", "blocky",
"collider", "colliders",
"deformer", "deformers",
"determinator",
"editability",
"effectors",
"expander",
"instancer",
"keyer",
"lacunarity",
"linkable",
"numerics",
"occluder", "occluders",
"overridable",
"passepartout",
"perspectively",
"pixelate",
"pointiness",
"polycount",
"polygonization", "polygonalization", # yuck!
"scalings",
"selectable", "selectability",
"shaper",
"smoothen", "smoothening",
"spherize", "spherized",
"stitchable",
"symmetrize",
"trackability",
"transmissivity",
"rasterized", "rasterization", "rasterizer",
"renderer", "renderers", "renderable", "renderability",
# Really bad!!!
"convertor",
"fullscr",
# Abbreviations
"aero",
"amb",
"anim",
"aov",
"app",
"bbox", "bboxes",
"bksp", # Backspace
"bool",
"calc",
"cfl",
"config", "configs",
"const",
"coord", "coords",
"degr",
"diff",
"dof",
"dupli", "duplis",
"eg",
"esc",
"expr",
"fac",
"fra",
"fract",
"frs",
"grless",
"http",
"init",
"irr", # Irradiance
"kbit", "kb",
"lang", "langs",
"lclick", "rclick",
"lensdist",
"loc", "rot", "pos",
"lorem",
"luma",
"mbs", # mouse button 'select'.
"mem",
"multicam",
"num",
"ok",
"orco",
"ortho",
"pano",
"persp",
"pref", "prefs",
"prev",
"param",
"premul",
"quad", "quads",
"quat", "quats",
"recalc", "recalcs",
"refl",
"sce",
"sel",
"spec",
"struct", "structs",
"subdiv",
"sys",
"tex",
"texcoord",
"tmr", # timer
"tri", "tris",
"udim", "udims",
"upres", # Upresolution
"usd",
"uv", "uvs", "uvw", "uw", "uvmap",
"ve",
"vec",
"vel", # velocity!
"vert", "verts",
"vis",
"vram",
"xor",
"xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
"xy", "xz", "yx", "yz", "zx", "zy",
# General computer/science terms
"affine",
"albedo",
"anamorphic",
"anisotropic", "anisotropy",
"bitangent",
"boid", "boids",
"ceil",
"compressibility",
"curvilinear",
"equiangular",
"equisolid",
"euler", "eulers",
"fribidi",
"gettext",
"hashable",
"hotspot",
"interocular",
"intrinsics",
"irradiance",
"isosurface",
"jitter", "jittering", "jittered",
"keymap", "keymaps",
"lambertian",
"laplacian",
"metadata",
"msgfmt",
"nand", "xnor",
"normals",
"numpad",
"octahedral",
"octree",
"omnidirectional",
"opengl",
"openmp",
"parametrization",
"photoreceptor",
"poly",
"polyline", "polylines",
"probabilistically",
"pulldown", "pulldowns",
"quantized",
"quartic",
"quaternion", "quaternions",
"quintic",
"samplerate",
"sawtooth",
"scrollback",
"scrollbar",
"scroller",
"searchable",
"spacebar",
"subtractive",
"superellipse",
"tooltip", "tooltips",
"trackpad",
"tuple",
"unicode",
"viewport", "viewports",
"viscoelastic",
"vorticity",
"waveform", "waveforms",
"wildcard", "wildcards",
"wintab", # Some Windows tablet API
# General computer graphics terms
"anaglyph",
"bezier", "beziers",
"bicubic",
"bilinear",
"bindpose",
"binormal",
"blackpoint", "whitepoint",
"blinn",
"bokeh",
"catadioptric",
"centroid",
"chroma",
"chrominance",
"clearcoat",
"codec", "codecs",
"collada",
"compositing",
"crossfade",
"cubemap", "cubemaps",
"cuda",
"deinterlace",
"dropoff",
"duotone",
"dv",
"eigenvectors",
"emissive",
"equirectangular",
"filmlike",
"fisheye",
"framerate",
"gimbal",
"grayscale",
"icosphere",
"inpaint",
"kerning",
"lightmap",
"linearlight",
"lossless", "lossy",
"luminance",
"mantaflow",
"matcap",
"midtones",
"mipmap", "mipmaps", "mip",
"ngon", "ngons",
"ntsc",
"nurb", "nurbs",
"perlin",
"phong",
"pinlight",
"qi",
"radiosity",
"raycasting",
"raytrace", "raytracing", "raytraced",
"refractions",
"remesher", "remeshing", "remesh",
"renderfarm",
"scanfill",
"shader", "shaders",
"shadowmap", "shadowmaps",
"softlight",
"specular", "specularity",
"spillmap",
"sobel",
"stereoscopy",
"texel",
"timecode",
"tonemap",
"toon",
"transmissive",
"vividlight",
"volumetrics",
"voronoi",
"voxel", "voxels",
"vsync",
"wireframe",
"zmask",
"ztransp",
# Blender terms
"audaspace",
"azone", # action zone
"backwire",
"bbone",
"bendy", # bones
"bmesh",
"breakdowner",
"bspline",
"bweight",
"colorband",
"datablock", "datablocks",
"despeckle",
"depsgraph",
"dopesheet",
"dupliface", "duplifaces",
"dupliframe", "dupliframes",
"dupliobject", "dupliob",
"dupligroup",
"duplivert",
"dyntopo",
"editbone",
"editmode",
"eevee",
"fcurve", "fcurves",
"fedge", "fedges",
"filmic",
"fluidsim",
"freestyle",
"enum", "enums",
"gizmogroup",
"gon", "gons", # N-Gon(s)
"gpencil",
"idcol",
"keyframe", "keyframes", "keyframing", "keyframed",
"lookdev",
"luminocity",
"mathvis",
"metaball", "metaballs", "mball",
"metaelement", "metaelements",
"metastrip", "metastrips",
"movieclip",
"mpoly",
"mtex",
"nabla",
"navmesh",
"outliner",
"overscan",
"paintmap", "paintmaps",
"polygroup", "polygroups",
"poselib",
"pushpull",
"pyconstraint", "pyconstraints",
"qe", # keys...
"shaderfx", "shaderfxs",
"shapekey", "shapekeys",
"shrinkfatten",
"shrinkwrap",
"softbody",
"stucci",
"subdiv",
"subtype",
"sunsky",
"tessface", "tessfaces",
"texface",
"timeline", "timelines",
"tosphere",
"uilist",
"userpref",
"vcol", "vcols",
"vgroup", "vgroups",
"vinterlace",
"vse",
"wasd", "wasdqe", # keys...
"wetmap", "wetmaps",
"wpaint",
"uvwarp",
# UOC (Ugly Operator Categories)
"cachefile",
"paintcurve",
"ptcache",
"dpaint",
# Algorithm/library names
"ashikhmin", # Ashikhmin-Shirley
"arsloe", # Texel-Marsen-Arsloe
"beckmann",
"blackman", # Blackman-Harris
"blosc",
"burley", # Christensen-Burley
"catmull",
"catrom",
"chebychev",
"conrady", # Brown-Conrady
"courant",
"cryptomatte", "crypto",
"embree",
"gmp",
"hosek",
"kutta",
"lennard",
"marsen", # Texel-Marsen-Arsloe
"mikktspace",
"minkowski",
"minnaert",
"moskowitz", # Pierson-Moskowitz
"musgrave",
"nayar",
"netravali",
"nishita",
"ogawa",
"oren",
"peucker", # Ramer-Douglas-Peucker
"pierson", # Pierson-Moskowitz
"preetham",
"prewitt",
"ramer", # Ramer-Douglas-Peucker
"runge",
"sobol",
"verlet",
"wilkie",
"worley",
# Acronyms
"aa", "msaa",
"ao",
"api",
"apic", # Affine Particle-In-Cell
"asc", "cdl",
"ascii",
"atrac",
"avx",
"bsdf",
"bssrdf",
"bw",
"ccd",
"cmd",
"cmos",
"cpus",
"ctrl",
"cw", "ccw",
"dev",
"djv",
"dpi",
"dvar",
"dx",
"eo",
"fh",
"fk",
"fov",
"fft",
"futura",
"fx",
"gfx",
"ggx",
"gl",
"glsl",
"gpl",
"gpu", "gpus",
"hc",
"hdc",
"hdr", "hdri", "hdris",
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
"hsv", "hsva", "hsl",
"id",
"ies",
"ior",
"itu",
"jonswap",
"lhs",
"lmb", "mmb", "rmb",
"kb",
"mocap",
"msgid", "msgids",
"mux",
"ndof",
"ppc",
"precisa",
"px",
"qmc",
"rdp",
"rgb", "rgba",
"rhs",
"rv",
"sdl",
"sl",
"smpte",
"ssao",
"ssr",
"svn",
"tma",
"ui",
"unix",
"vbo", "vbos",
"vr",
"wxyz",
"xr",
"ycc", "ycca",
"yrgb",
"yuv", "yuva",
# Blender acronyms
"bge",
"bli",
"bpy",
"bvh",
"dbvt",
"dop", # BLI K-Dop BVH
"ik",
"nla",
"py",
"qbvh",
"rna",
"rvo",
"simd",
"sph",
"svbvh",
# Files types/formats
"avi",
"attrac",
"autocad",
"autodesk",
"bmp",
"btx",
"cineon",
"dpx",
"dwaa",
"dwab",
"dxf",
"eps",
"exr",
"fbx",
"fbxnode",
"ffmpeg",
"flac",
"gltf",
"gzip",
"ico",
"jpg", "jpeg", "jpegs",
"json",
"matroska",
"mdd",
"mkv",
"mpeg", "mjpeg",
"mtl",
"ogg",
"openjpeg",
"osl",
"oso",
"piz",
"png", "pngs",
"po",
"quicktime",
"rle",
"sgi",
"stl",
"svg",
"targa", "tga",
"tiff",
"theora",
"vorbis",
"vp9",
"wav",
"webm",
"xiph",
"xml",
"xna",
"xvid",
}
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
_split_words = re.compile(_valid_words).findall
@classmethod
def split_words(cls, text):
return [w for w in cls._split_words(text) if w]
def __init__(self, settings, lang="en_US"):
self.settings = settings
self.dict_spelling = enchant.Dict(lang)
self.cache = set(self.uimsgs)
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'rb') as f:
self.cache |= set(pickle.load(f))
def __del__(self):
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'wb') as f:
pickle.dump(self.cache, f)
def check(self, txt):
ret = []
if txt in self.cache:
return ret
for w in self.split_words(txt):
w_lower = w.lower()
if w_lower in self.cache:
continue
if not self.dict_spelling.check(w):
ret.append((w, self.dict_spelling.suggest(w)))
else:
self.cache.add(w_lower)
if not ret:
self.cache.add(txt)
return ret
| [
[
[
822,
829
],
[
18521,
18528
]
],
[
[
837,
839
],
[
18642,
18644
],
[
18843,
18845
]
],
[
[
847,
853
],
[
18740,
18746
],
[
18923,
18929
]
],
[
[
861,
863
],
[
18271,
18273
]
],
[
[
872,
884
]
]
] |
#!/usr/bin/env python
"""
Example script to register two volumes with VoxelMorph models.
Please make sure to use trained models appropriately. Let's say we have a model trained to register
a scan (moving) to an atlas (fixed). To register a scan to the atlas and save the warp field, run:
register.py --moving moving.nii.gz --fixed fixed.nii.gz --model model.pt
--moved moved.nii.gz --warp warp.nii.gz
The source and target input images are expected to be affinely registered.
If you use this code, please cite the following, and read function docs for further info/citations
VoxelMorph: A Learning Framework for Deformable Medical Image Registration
G. Balakrishnan, A. Zhao, M. R. Sabuncu, J. Guttag, A.V. Dalca.
IEEE TMI: Transactions on Medical Imaging. 38(8). pp 1788-1800. 2019.
or
Unsupervised Learning for Probabilistic Diffeomorphic Registration for Images and Surfaces
A.V. Dalca, G. Balakrishnan, J. Guttag, M.R. Sabuncu.
MedIA: Medical Image Analysis. (57). pp 226-236, 2019
Copyright 2020 Adrian V. Dalca
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and limitations under
the License.
"""
import os
import argparse
import matplotlib.pyplot as plt
# third party
import numpy as np
import nibabel as nib
import torch
from scipy.interpolate import RegularGridInterpolator
from astropy.coordinates import cartesian_to_spherical, spherical_to_cartesian
# import voxelmorph with sphere backend
os.environ['VXM_BACKEND'] = 'sphere'
import voxelmorph as vxm # nopep8
import math
# parse commandline args
parser = argparse.ArgumentParser()
parser.add_argument('--moving', required=True, help='moving image (source) filename')
parser.add_argument('--fixed', required=True, help='fixed image (target) filename')
parser.add_argument('--moved', help='warped image output filename')
parser.add_argument('--model', required=True, help='pytorch model for nonlinear registration')
# parser.add_argument('--normalize_type', default='std', help='select the data normalization processing type')
parser.add_argument('--warp', help='output warp deformation filename')
parser.add_argument('--sphere_sub', help='sphere_sub image filename')
parser.add_argument('--sphere_atlas', help='sphere_atlas image filename')
parser.add_argument('--sphere_reg', help='sphere.reg image output filename')
parser.add_argument('--sulc_sub', help='silc_sub image filename')
parser.add_argument('--sulc_atlas', help='silc_atlas image filename')
parser.add_argument('--sphere_freesurfer', help='sphere_freesurfer image filename')
parser.add_argument('--plot_image', help='show time image output filename')
parser.add_argument('--plot_image_dif_1', help='show dif image output filename')
parser.add_argument('--plot_image_dif_2', help='show dif image output filename')
parser.add_argument('-g', '--gpu', help='GPU number(s) - if not supplied, CPU is used')
parser.add_argument('--multichannel', action='store_true',
help='specify that data has multiple channels')
args = parser.parse_args()
def meannormalize(sub_data):
mean = np.mean(sub_data)
std = np.std(sub_data)
norm = (sub_data - mean) / std
return norm, mean, std
def backmeannormalize(input, mean, std):
output = input * std + mean
return output
def minmaxnormalize(sub_data):
zeros = sub_data == 0
max = np.max(sub_data)
min = np.min(sub_data)
norm = (sub_data - min) / (max - min)
norm[zeros] = 0
return norm
def backminmaxnormalize(input, max, min):
output = input * (max - min) + min
return output
def domainnorm(sub_data):
domain = 33
norm = sub_data / domain
return norm
def backdomainnorm(sub_data):
domain = 33
output = sub_data * domain
return output
# def normalize_forword(data, type="std"):
# if type == "std":
# return meannormalize(data)
# elif type == "min_max":
# return minmaxnormalize(data)
# else:
# raise KeyError("type is error")
#
# def normalize_backword(data, a, b, type="std"):
# if type == "std":
# return backmeannormalize(data, a, b)
# elif type == "min_max":
# return backminmaxnormalize(data, a, b)
# else:
# raise KeyError("type is error")
def interpolate(warp_file, lh_sphere):
x = np.linspace(-128, 128, 256) # phi ###
y = np.linspace(0, 512, 512) # theta ###
# print(warp_file.files)
warp = warp_file.squeeze()
warp = warp.permute(0, 2, 1)
warp = warp.detach().numpy()
# warp = warp_file['vol']
# warp = np.moveaxis(warp, 1, -1)
interpolate_function_x = RegularGridInterpolator((x, y), -warp[0]) # x-axis
interpolate_function_y = RegularGridInterpolator((x, y), -warp[1]) # y-axis
coords, faces = nib.freesurfer.read_geometry(lh_sphere)
r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])
p = phi.degree
t = theta.degree
theta_bins = 512
phi_bins = 256
theta_width = math.degrees(2 * np.pi) / theta_bins
t /= theta_width
phi_width = math.degrees(np.pi) / phi_bins
p /= phi_width
t = t.reshape(-1, 1)
p = p.reshape(-1, 1)
pts = np.concatenate((p, t), axis=1)
new_pts_x = interpolate_function_x(pts)
new_pts_y = interpolate_function_y(pts)
x_prime = pts.T[0] + new_pts_x
y_prime = pts.T[1] + new_pts_y
x_prime *= phi_width
y_prime *= theta_width
y_prime = np.clip(y_prime, 0, 360)
x_prime = np.clip(x_prime, -90, 90)
t_prime = [math.radians(i) for i in y_prime]
p_prime = [math.radians(i) for i in x_prime]
t_prime = np.array(t_prime)
p_prime = np.array(p_prime)
return r, p_prime, t_prime
# save 4 image
def save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,
imagesavefilename):
lh_morph_sulc_sub = nib.freesurfer.read_morph_data(lh_sulc_sub)
lh_morph_sulc_atlas = nib.freesurfer.read_morph_data(lh_sulc_atlas)
coords_sub, faces_sub = nib.freesurfer.read_geometry(lh_sphere_sub)
r_sub, phi_sub, theta_sub = cartesian_to_spherical(coords_sub[:, 0], coords_sub[:, 1], coords_sub[:, 2])
coords_atlas, faces_atlas = nib.freesurfer.read_geometry(lh_sphere_atlas)
r_atlas, phi_atlas, theta_atlas = cartesian_to_spherical(coords_atlas[:, 0], coords_atlas[:, 1], coords_atlas[:, 2])
coords_freesurfer, faces_freesurfer = nib.freesurfer.read_geometry(lh_sphere_freesurfer)
r_reg, phi_reg, theta_reg = cartesian_to_spherical(coords_freesurfer[:, 0], coords_freesurfer[:, 1],
coords_freesurfer[:, 2])
fig = plt.figure(figsize=(14, 7))
ax = fig.add_subplot(141)
ax.scatter(phi_sub.degree, theta_sub.degree, s=0.1,
c=lh_morph_sulc_sub) # phi.degree: [-90, 90], theta.degree: [0, 360]
plt.title('Moving')
ax = fig.add_subplot(142)
ax.scatter(phi_atlas.degree, theta_atlas.degree, s=0.1, c=lh_morph_sulc_atlas)
plt.title('Fixed')
ax = fig.add_subplot(143)
phi_prime = [math.degrees(p) for p in phi_prime]
thtea_prime = [math.degrees(t) for t in theta_prime]
ax.scatter(phi_prime, thtea_prime, s=0.1, c=lh_morph_sulc_sub) # (256, 512)
plt.title('Moved')
ax = fig.add_subplot(144)
ax.scatter(phi_reg.degree, theta_reg.degree, s=0.1, c=lh_morph_sulc_sub) # (256, 512)
plt.title('Moved FreeSurfer')
plt.savefig(imagesavefilename)
def xyz2degree(lh_sphere, lh_sulc):
# coords: return (x, y, z) coordinates
# faces: defining mesh triangles
coords, faces = nib.freesurfer.read_geometry(lh_sphere)
# (r: radius, phi: latitude, theta: longitude) in radians
r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])
lat = phi.degree + 90
lon = theta.degree
# resize to (512, 256)
y_bins = 512
x_bins = 256
y_width = math.degrees(2 * np.pi) / y_bins
ys = lon // y_width
x_width = math.degrees(np.pi) / x_bins
xs = lat // x_width
ys = np.clip(ys, 0, 511)
xs = np.clip(xs, 0, 255)
# load curv and sulc info
lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)
xs = xs.astype(np.int32)
ys = ys.astype(np.int32)
# values store [theta, phi, sulc value, curv value]
values = np.zeros((512, 256))
values[ys, xs] = lh_morph_sulc
# values[1, ys, xs] = lh_morph_curv
return values
def xyz2degree2(phi, theta, lh_sulc):
lat = phi + 90
lon = theta
# resize to (512, 256)
y_bins = 512
x_bins = 256
y_width = math.degrees(2 * np.pi) / y_bins
ys = lon // y_width
x_width = math.degrees(np.pi) / x_bins
xs = lat // x_width
ys = np.clip(ys, 0, 511)
xs = np.clip(xs, 0, 255)
# load curv and sulc info
lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)
xs = xs.astype(np.int32)
ys = ys.astype(np.int32)
# values store [theta, phi, sulc value, curv value]
values = np.zeros((512, 256))
values[ys, xs] = lh_morph_sulc
# values[1, ys, xs] = lh_morph_curv
return values
# device handling
if args.gpu and (args.gpu != '-1'):
device = 'cuda'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
else:
device = 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# load moving and fixed images
add_feat_axis = not args.multichannel
moving = vxm.py.utils.load_volfile(args.moving, add_batch_axis=True, add_feat_axis=add_feat_axis)
fixed, fixed_affine = vxm.py.utils.load_volfile(
args.fixed, add_batch_axis=True, add_feat_axis=add_feat_axis, ret_affine=True)
# load and set up model
model = vxm.networks.VxmDense.load(args.model, device)
model.to(device)
model.eval()
# set up normalize type
# normalize_type = args.normalize_type
# normalize_type = "min_max"
# set up tensors and permute
# moving, a_moving, b_moving = normalize_forword(moving, type=normalize_type)
# fixed, a_fixed, b_fixed = normalize_forword(fixed, type=normalize_type)
# moving = domainnorm(moving)
moving = minmaxnormalize(moving)
fixed = minmaxnormalize(fixed)
input_moving = torch.from_numpy(moving).to(device).float().permute(0, 3, 1, 2)
input_fixed = torch.from_numpy(fixed).to(device).float().permute(0, 3, 1, 2)
# predict
moved, warp = model(input_moving, input_fixed, registration=True)
# moved = normalize_backword(moved, a_moving, b_moving, type=normalize_type)
# moved = backdomainnorm(moved)
if args.sphere_sub:
c, faces = nib.freesurfer.read_geometry(args.sphere_sub)
coords = np.empty(shape=c.shape)
r, phi_prime, theta_prime = interpolate(warp, args.sphere_sub)
coords[:, 0], coords[:, 1], coords[:, 2] = spherical_to_cartesian(r, phi_prime, theta_prime)
nib.freesurfer.io.write_geometry(args.sphere_reg, coords, faces)
if args.plot_image:
lh_sphere_sub = args.sphere_sub
lh_sphere_atlas = args.sphere_atlas
lh_sulc_sub = args.sulc_sub
lh_sulc_atlas = args.sulc_atlas
lh_sphere_freesurfer = args.sphere_freesurfer
imagesavefilename = args.plot_image
save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,
imagesavefilename)
if args.plot_image_dif_1 or args.plot_image_dif_2:
imagesavefilenamedif_1 = args.plot_image_dif_1
imagesavefilenamedif_2 = args.plot_image_dif_2
dif_moving = xyz2degree(lh_sphere_sub, lh_sulc_sub)
dif_moved = xyz2degree2(phi_prime, theta_prime, lh_sulc_sub)
dif_freesurfer = xyz2degree(lh_sphere_freesurfer, lh_sulc_sub)
dif_moved_moving = dif_moved - dif_moving
print(np.nanmax(dif_moved_moving), np.nanmin(dif_moved_moving), np.nanmean(dif_moved_moving))
dif_freesurfer_moved = dif_freesurfer - dif_moved
plt.figure(figsize=(14, 7))
plt.imshow(dif_moved_moving)
plt.title('moved_moving')
plt.colorbar()
plt.savefig(imagesavefilenamedif_1)
plt.figure(figsize=(14, 7))
plt.imshow(dif_freesurfer_moved)
plt.title('freesurfer_moved')
plt.colorbar()
plt.savefig(imagesavefilenamedif_2)
# save moved image
if args.moved:
moved = moved.detach().cpu().numpy().squeeze()
vxm.py.utils.save_volfile(moved, args.moved, fixed_affine)
# save warp
if args.warp:
warp = warp.detach().cpu().numpy().squeeze()
vxm.py.utils.save_volfile(warp, args.warp, fixed_affine)
| [
[
[
1608,
1610
],
[
1901,
1903
],
[
9627,
9629
],
[
9702,
9704
]
],
[
[
1618,
1626
],
[
2020,
2028
]
],
[
[
1634,
1658
],
[
12174,
12177
],
[
12206,
12209
],
[
12239,
12242
],
[
12269,
12272
],
[
12288,
12291
],
[
12329,
12332
],
[
12361,
12364
],
[
12398,
12401
],
[
12432,
12435
],
[
12451,
12454
],
[
7108,
7111
],
[
7311,
7314
],
[
7449,
7452
],
[
7694,
7697
],
[
7839,
7842
],
[
7874,
7877
]
],
[
[
1680,
1691
],
[
10963,
10965
],
[
12027,
12029
],
[
12056,
12058
],
[
12085,
12087
],
[
3526,
3528
],
[
3554,
3556
],
[
3795,
3797
],
[
3822,
3824
],
[
4737,
4739
],
[
4784,
4786
],
[
5442,
5444
],
[
5512,
5514
],
[
5609,
5611
],
[
5866,
5868
],
[
5905,
5907
],
[
6044,
6046
],
[
6076,
6078
],
[
8373,
8375
],
[
8440,
8442
],
[
8490,
8492
],
[
8519,
8521
],
[
8649,
8651
],
[
8678,
8680
],
[
8758,
8760
],
[
9044,
9046
],
[
9111,
9113
],
[
9161,
9163
],
[
9190,
9192
],
[
9320,
9322
],
[
9349,
9351
],
[
9429,
9431
]
],
[
[
1699,
1713
],
[
10904,
10907
],
[
11155,
11158
],
[
5201,
5204
],
[
6322,
6325
],
[
6392,
6395
],
[
6467,
6470
],
[
6652,
6655
],
[
6861,
6864
],
[
8043,
8046
],
[
8590,
8593
],
[
9261,
9264
]
],
[
[
1721,
1726
],
[
10540,
10545
],
[
10618,
10623
]
],
[
[
1757,
1780
],
[
5047,
5070
],
[
5128,
5151
]
],
[
[
1813,
1835
],
[
5261,
5283
],
[
6543,
6565
],
[
6736,
6758
],
[
6944,
6966
],
[
8166,
8188
]
],
[
[
1837,
1859
],
[
11101,
11123
]
],
[
[
1945,
1962
],
[
9823,
9826
],
[
9934,
9937
],
[
10077,
10080
],
[
12578,
12581
],
[
12717,
12720
]
],
[
[
1980,
1984
],
[
5425,
5429
],
[
5499,
5503
],
[
5947,
5951
],
[
5996,
6000
],
[
7516,
7520
],
[
7571,
7575
],
[
8356,
8360
],
[
8427,
8431
],
[
9027,
9031
],
[
9098,
9102
]
],
[
[
2011,
2017
],
[
2046,
2052
],
[
2132,
2138
],
[
2216,
2222
],
[
2284,
2290
],
[
2491,
2497
],
[
2562,
2568
],
[
2632,
2638
],
[
2706,
2712
],
[
2783,
2789
],
[
2849,
2855
],
[
2919,
2925
],
[
3003,
3009
],
[
3079,
3085
],
[
3160,
3166
],
[
3241,
3247
],
[
3329,
3335
],
[
3464,
3470
]
],
[
[
3457,
3461
],
[
9570,
9574
],
[
9584,
9588
],
[
9664,
9668
],
[
9796,
9800
],
[
9849,
9853
],
[
9965,
9969
],
[
10104,
10108
],
[
10872,
10876
],
[
10933,
10937
],
[
11037,
11041
],
[
11188,
11192
],
[
11224,
11228
],
[
11261,
11265
],
[
11299,
11303
],
[
11335,
11339
],
[
11369,
11373
],
[
11412,
11416
],
[
11459,
11463
],
[
11633,
11637
],
[
11658,
11662
],
[
11710,
11714
],
[
11761,
11765
],
[
12511,
12515
],
[
12611,
12615
],
[
12653,
12657
],
[
12749,
12753
]
],
[
[
3490,
3503
]
],
[
[
3639,
3656
]
],
[
[
3732,
3747
],
[
10469,
10484
],
[
10501,
10516
]
],
[
[
3923,
3942
]
],
[
[
4024,
4034
]
],
[
[
4113,
4127
]
],
[
[
4694,
4705
],
[
11019,
11030
]
],
[
[
6146,
6156
],
[
11479,
11489
]
],
[
[
7911,
7921
],
[
11800,
11810
],
[
11925,
11935
]
],
[
[
8882,
8893
],
[
11855,
11866
]
],
[
[
9607,
9613
],
[
10116,
10122
],
[
10133,
10139
],
[
10568,
10574
],
[
10645,
10651
]
],
[
[
9683,
9689
],
[
10116,
10122
],
[
10133,
10139
],
[
10568,
10574
],
[
10645,
10651
]
],
[
[
9776,
9789
],
[
9897,
9910
],
[
10012,
10025
]
],
[
[
9814,
9820
],
[
10485,
10491
]
],
[
[
9912,
9917
],
[
10517,
10522
]
],
[
[
9919,
9931
],
[
12623,
12635
],
[
12760,
12772
]
],
[
[
10069,
10074
],
[
10124,
10129
],
[
10141,
10146
],
[
10706,
10711
]
],
[
[
10460,
10466
],
[
10557,
10563
]
],
[
[
10493,
10498
],
[
10635,
10640
]
],
[
[
10525,
10537
],
[
10712,
10724
]
],
[
[
10604,
10615
],
[
10726,
10737
]
],
[
[
10692,
10697
],
[
12535,
12540
]
],
[
[
10699,
10703
],
[
11031,
11035
],
[
12675,
12679
]
],
[
[
10893,
10894
],
[
10978,
10979
]
],
[
[
10896,
10901
],
[
11213,
11218
]
],
[
[
10954,
10960
],
[
11058,
11064
],
[
11072,
11078
],
[
11086,
11092
],
[
11205,
11211
]
],
[
[
10991,
10992
],
[
11124,
11125
]
],
[
[
10994,
11003
],
[
11127,
11136
],
[
11572,
11581
],
[
11867,
11876
]
],
[
[
11005,
11016
],
[
11138,
11149
],
[
11583,
11594
],
[
11878,
11889
]
],
[
[
11245,
11258
],
[
11490,
11503
],
[
11811,
11824
]
],
[
[
11281,
11296
],
[
11505,
11520
]
],
[
[
11321,
11332
],
[
11522,
11533
],
[
11826,
11837
],
[
11891,
11902
],
[
11958,
11969
]
],
[
[
11353,
11366
],
[
11535,
11548
]
],
[
[
11389,
11409
],
[
11550,
11570
],
[
11936,
11956
]
],
[
[
11439,
11456
],
[
11611,
11628
]
],
[
[
11685,
11707
],
[
12300,
12322
]
],
[
[
11736,
11758
],
[
12463,
12485
]
],
[
[
11787,
11797
],
[
12006,
12016
]
],
[
[
11843,
11852
],
[
11994,
12003
],
[
12159,
12168
]
],
[
[
11908,
11922
],
[
12142,
12156
]
],
[
[
11975,
11991
],
[
12037,
12053
],
[
12066,
12082
],
[
12096,
12112
],
[
12217,
12233
]
],
[
[
12119,
12139
],
[
12372,
12392
]
],
[
[
12527,
12532
],
[
12604,
12609
]
],
[
[
12668,
12672
],
[
12743,
12747
]
]
] |
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.algorithms.image import estimate_image_hierarchical
from pymor.algorithms.projection import project, project_to_subbasis
from pymor.core.base import BasicObject
from pymor.core.exceptions import ImageCollectionError
from pymor.operators.constructions import ZeroOperator
from pymor.operators.interface import Operator
class ResidualReductor(BasicObject):
"""Generic reduced basis residual reductor.
Given an operator and a right-hand side, the residual is given by::
residual.apply(U, mu) == operator.apply(U, mu) - rhs.as_range_array(mu)
When operator maps to functionals instead of vectors, we are interested in the Riesz
representative of the residual::
residual.apply(U, mu)
== product.apply_inverse(operator.apply(U, mu) - rhs.as_range_array(mu))
Given a basis `RB` of a subspace of the source space of `operator`, this reductor
uses :func:`~pymor.algorithms.image.estimate_image_hierarchical` to determine
a low-dimensional subspace containing the image of the subspace under
`residual` (resp. `riesz_residual`), computes an orthonormal basis
`residual_range` for this range space and then returns the Petrov-Galerkin projection ::
projected_residual
== project(residual, range_basis=residual_range, source_basis=RB)
of the residual operator. Given a reduced basis coefficient vector `u`, w.r.t.
`RB`, the (dual) norm of the residual can then be computed as ::
projected_residual.apply(u, mu).norm()
Moreover, a `reconstruct` method is provided such that ::
residual_reductor.reconstruct(projected_residual.apply(u, mu))
== residual.apply(RB.lincomb(u), mu)
Parameters
----------
RB
|VectorArray| containing a basis of the reduced space onto which to project.
operator
See definition of `residual`.
rhs
See definition of `residual`. If `None`, zero right-hand side is assumed.
product
Inner product |Operator| w.r.t. which to orthonormalize and w.r.t. which to
compute the Riesz representatives in case `operator` maps to functionals.
riesz_representatives
If `True` compute the Riesz representative of the residual.
"""
def __init__(self, RB, operator, rhs=None, product=None, riesz_representatives=False):
assert RB in operator.source
assert rhs is None \
or (rhs.source.is_scalar and rhs.range == operator.range and rhs.linear)
assert product is None or product.source == product.range == operator.range
self.__auto_init(locals())
self.residual_range = operator.range.empty()
self.residual_range_dims = []
def reduce(self):
if self.residual_range is not False:
with self.logger.block('Estimating residual range ...'):
try:
self.residual_range, self.residual_range_dims = \
estimate_image_hierarchical([self.operator], [self.rhs],
self.RB,
(self.residual_range, self.residual_range_dims),
orthonormalize=True, product=self.product,
riesz_representatives=self.riesz_representatives)
except ImageCollectionError as e:
self.logger.warning(f'Cannot compute range of {e.op}. Evaluation will be slow.')
self.residual_range = False
if self.residual_range is False:
operator = project(self.operator, None, self.RB)
return NonProjectedResidualOperator(operator, self.rhs, self.riesz_representatives, self.product)
with self.logger.block('Projecting residual operator ...'):
if self.riesz_representatives:
operator = project(self.operator, self.residual_range, self.RB, product=None) # the product cancels out
rhs = project(self.rhs, self.residual_range, None, product=None)
else:
operator = project(self.operator, self.residual_range, self.RB, product=self.product)
rhs = project(self.rhs, self.residual_range, None, product=self.product)
return ResidualOperator(operator, rhs)
def reconstruct(self, u):
"""Reconstruct high-dimensional residual vector from reduced vector `u`."""
if self.residual_range is False:
if self.product:
return u * (u.norm() / u.norm(self.product))[0]
else:
return u
else:
return self.residual_range[:u.dim].lincomb(u.to_numpy())
class ResidualOperator(Operator):
"""Instantiated by :class:`ResidualReductor`."""
def __init__(self, operator, rhs, name=None):
self.__auto_init(locals())
self.source = operator.source
self.range = operator.range
self.linear = operator.linear
self.rhs_vector = rhs.as_range_array() if rhs and not rhs.parametric else None
def apply(self, U, mu=None):
V = self.operator.apply(U, mu=mu)
if self.rhs:
F = self.rhs_vector or self.rhs.as_range_array(mu)
if len(V) > 1:
V -= F[[0]*len(V)]
else:
V -= F
return V
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return ResidualOperator(project_to_subbasis(self.operator, dim_range, dim_source),
project_to_subbasis(self.rhs, dim_range, None),
name=name)
class NonProjectedResidualOperator(ResidualOperator):
"""Instantiated by :class:`ResidualReductor`.
Not to be used directly.
"""
def __init__(self, operator, rhs, riesz_representatives, product):
super().__init__(operator, rhs)
self.__auto_init(locals())
def apply(self, U, mu=None):
R = super().apply(U, mu=mu)
if self.product:
if self.riesz_representatives:
R_riesz = self.product.apply_inverse(R)
# divide by norm, except when norm is zero:
inversel2 = 1./R_riesz.norm()
inversel2 = np.nan_to_num(inversel2)
R_riesz.scal(np.sqrt(R_riesz.pairwise_inner(R)) * inversel2)
return R_riesz
else:
# divide by norm, except when norm is zero:
inversel2 = 1./R.norm()
inversel2 = np.nan_to_num(inversel2)
R.scal(np.sqrt(self.product.pairwise_apply2(R, R)) * inversel2)
return R
else:
return R
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return self.with_(operator=project_to_subbasis(self.operator, None, dim_source))
class ImplicitEulerResidualReductor(BasicObject):
"""Reduced basis residual reductor with mass operator for implicit Euler timestepping.
Given an operator, mass and a functional, the concatenation of residual operator
with the Riesz isomorphism is given by::
riesz_residual.apply(U, U_old, mu)
== product.apply_inverse(operator.apply(U, mu) + 1/dt*mass.apply(U, mu)
- 1/dt*mass.apply(U_old, mu) - rhs.as_vector(mu))
This reductor determines a low-dimensional subspace of the image of a reduced basis space under
`riesz_residual` using :func:`~pymor.algorithms.image.estimate_image_hierarchical`, computes an
orthonormal basis `residual_range` of this range space and then returns the Petrov-Galerkin
projection ::
projected_riesz_residual
== riesz_residual.projected(range_basis=residual_range, source_basis=RB)
of the `riesz_residual` operator. Given reduced basis coefficient vectors `u` and `u_old`,
the dual norm of the residual can then be computed as ::
projected_riesz_residual.apply(u, u_old, mu).norm()
Moreover, a `reconstruct` method is provided such that ::
residual_reductor.reconstruct(projected_riesz_residual.apply(u, u_old, mu))
== riesz_residual.apply(RB.lincomb(u), RB.lincomb(u_old), mu)
Parameters
----------
operator
See definition of `riesz_residual`.
mass
The mass operator. See definition of `riesz_residual`.
dt
The time step size. See definition of `riesz_residual`.
rhs
See definition of `riesz_residual`. If `None`, zero right-hand side is assumed.
RB
|VectorArray| containing a basis of the reduced space onto which to project.
product
Inner product |Operator| w.r.t. which to compute the Riesz representatives.
"""
def __init__(self, RB, operator, mass, dt, rhs=None, product=None):
assert RB in operator.source
assert rhs.source.is_scalar and rhs.range == operator.range and rhs.linear
assert product is None or product.source == product.range == operator.range
self.__auto_init(locals())
self.residual_range = operator.range.empty()
self.residual_range_dims = []
def reduce(self):
if self.residual_range is not False:
with self.logger.block('Estimating residual range ...'):
try:
self.residual_range, self.residual_range_dims = \
estimate_image_hierarchical([self.operator, self.mass], [self.rhs],
self.RB,
(self.residual_range, self.residual_range_dims),
orthonormalize=True, product=self.product,
riesz_representatives=True)
except ImageCollectionError as e:
self.logger.warning(f'Cannot compute range of {e.op}. Evaluation will be slow.')
self.residual_range = False
if self.residual_range is False:
operator = project(self.operator, None, self.RB)
mass = project(self.mass, None, self.RB)
return NonProjectedImplicitEulerResidualOperator(operator, mass, self.rhs, self.dt, self.product)
with self.logger.block('Projecting residual operator ...'):
# the product always cancels out
operator = project(self.operator, self.residual_range, self.RB, product=None)
mass = project(self.mass, self.residual_range, self.RB, product=None)
rhs = project(self.rhs, self.residual_range, None, product=None)
return ImplicitEulerResidualOperator(operator, mass, rhs, self.dt)
def reconstruct(self, u):
"""Reconstruct high-dimensional residual vector from reduced vector `u`."""
if self.residual_range is False:
if self.product:
return u * (u.norm() / u.norm(self.product))[0]
else:
return u
else:
return self.residual_range[:u.dim].lincomb(u.to_numpy())
class ImplicitEulerResidualOperator(Operator):
"""Instantiated by :class:`ImplicitEulerResidualReductor`."""
def __init__(self, operator, mass, rhs, dt, name=None):
self.__auto_init(locals())
self.source = operator.source
self.range = operator.range
self.linear = operator.linear
self.rhs_vector = rhs.as_range_array() if not rhs.parametric else None
def apply(self, U, U_old, mu=None):
V = self.operator.apply(U, mu=mu)
V.axpy(1./self.dt, self.mass.apply(U, mu=mu))
V.axpy(-1./self.dt, self.mass.apply(U_old, mu=mu))
if not isinstance(self.rhs, ZeroOperator):
F = self.rhs_vector or self.rhs.as_range_array(mu)
if len(V) > 1:
V -= F[[0]*len(V)]
else:
V -= F
return V
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return ImplicitEulerResidualOperator(project_to_subbasis(self.operator, dim_range, dim_source),
project_to_subbasis(self.mass, dim_range, dim_source),
project_to_subbasis(self.rhs, dim_range, None),
self.dt,
name=name)
class NonProjectedImplicitEulerResidualOperator(ImplicitEulerResidualOperator):
"""Instantiated by :class:`ImplicitEulerResidualReductor`.
Not to be used directly.
"""
def __init__(self, operator, mass, rhs, dt, product):
super().__init__(operator, mass, rhs, dt)
self.product = product
def apply(self, U, U_old, mu=None):
R = super().apply(U, U_old, mu=mu)
if self.product:
R_riesz = self.product.apply_inverse(R)
# divide by norm, except when norm is zero:
inversel2 = 1./R_riesz.norm()
inversel2 = np.nan_to_num(inversel2)
R_riesz.scal(np.sqrt(R_riesz.pairwise_inner(R)) * inversel2)
return R_riesz
else:
return R
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return self.with_(operator=project_to_subbasis(self.operator, None, dim_source),
mass=project_to_subbasis(self.mass, None, dim_source))
| [
[
[
231,
242
],
[
6548,
6550
],
[
6602,
6604
],
[
6827,
6829
],
[
6875,
6877
],
[
13295,
13297
],
[
13345,
13347
]
],
[
[
279,
306
],
[
3205,
3232
],
[
9684,
9711
]
],
[
[
347,
354
],
[
3885,
3892
],
[
4172,
4179
],
[
4288,
4295
],
[
4392,
4399
],
[
4489,
4496
],
[
10353,
10360
],
[
10410,
10417
],
[
10691,
10698
],
[
10777,
10784
],
[
10858,
10865
]
],
[
[
356,
375
],
[
5747,
5766
],
[
5838,
5857
],
[
7109,
7128
],
[
12327,
12346
],
[
12431,
12450
],
[
12531,
12550
],
[
13572,
13591
],
[
13657,
13676
]
],
[
[
404,
415
],
[
598,
609
],
[
7201,
7212
]
],
[
[
450,
470
],
[
3644,
3664
],
[
10112,
10132
]
],
[
[
513,
525
],
[
12002,
12014
]
],
[
[
564,
572
],
[
5004,
5012
],
[
11406,
11414
]
],
[
[
581,
597
]
],
[
[
4987,
5003
],
[
5966,
5982
],
[
4572,
4588
],
[
5730,
5746
]
],
[
[
5937,
5965
],
[
3942,
3970
]
],
[
[
7171,
7200
]
],
[
[
11376,
11405
],
[
12739,
12768
],
[
10933,
10962
],
[
12297,
12326
]
],
[
[
12697,
12738
],
[
10463,
10504
]
]
] |
import logging
import hmac
from hashlib import sha256
import os
import urllib
from datetime import datetime
log = logging.getLogger(__name__)
# This warning is stupid
# pylint: disable=logging-fstring-interpolation
def prepend_bucketname(name):
prefix = os.getenv('BUCKETNAME_PREFIX', "gsfc-ngap-{}-".format(os.getenv('MATURITY', 'DEV')[0:1].lower()))
return "{}{}".format(prefix, name)
def hmacsha256(key, string):
return hmac.new(key, string.encode('utf-8'), sha256)
def get_presigned_url(session, bucket_name, object_name, region_name, expire_seconds, user_id, method='GET'):
timez = datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
datez = timez[:8]
hostname = "{0}.s3{1}.amazonaws.com".format(bucket_name, "."+region_name if region_name != "us-east-1" else "")
cred = session['Credentials']['AccessKeyId']
secret = session['Credentials']['SecretAccessKey']
token = session['Credentials']['SessionToken']
aws4_request = "/".join([datez, region_name, "s3", "aws4_request"])
cred_string = "{0}/{1}".format(cred, aws4_request)
# Canonical Query String Parts
parts = ["A-userid={0}".format(user_id),
"X-Amz-Algorithm=AWS4-HMAC-SHA256",
"X-Amz-Credential="+urllib.parse.quote_plus(cred_string),
"X-Amz-Date="+timez,
"X-Amz-Expires={0}".format(expire_seconds),
"X-Amz-Security-Token="+urllib.parse.quote_plus(token),
"X-Amz-SignedHeaders=host"]
can_query_string = "&".join(parts)
# Canonical Requst
can_req = method + "\n/" + object_name + "\n" + can_query_string + "\nhost:" + hostname + "\n\nhost\nUNSIGNED-PAYLOAD"
can_req_hash = sha256(can_req.encode('utf-8')).hexdigest()
# String to Sign
stringtosign = "\n".join(["AWS4-HMAC-SHA256", timez, aws4_request, can_req_hash])
# Signing Key
StepOne = hmacsha256( "AWS4{0}".format(secret).encode('utf-8'), datez).digest()
StepTwo = hmacsha256( StepOne, region_name ).digest()
StepThree = hmacsha256( StepTwo, "s3").digest()
SigningKey = hmacsha256( StepThree, "aws4_request").digest()
# Final Signature
Signature = hmacsha256(SigningKey, stringtosign).hexdigest()
# Dump URL
url = "https://" + hostname + "/" + object_name + "?" + can_query_string + "&X-Amz-Signature=" + Signature
return url
def get_bucket_dynamic_path(path_list, b_map):
# Old and REVERSE format has no 'MAP'. In either case, we don't want it fouling our dict.
if 'MAP' in b_map:
map_dict = b_map['MAP']
else:
map_dict = b_map
mapping = []
log.debug("Pathparts is {0}".format(", ".join(path_list)))
# walk the bucket map to see if this path is valid
for path_part in path_list:
# Check if we hit a leaf of the YAML tree
if (mapping and isinstance(map_dict, str)) or 'bucket' in map_dict: #
customheaders = {}
if isinstance(map_dict, dict) and 'bucket' in map_dict:
bucketname = map_dict['bucket']
if 'headers' in map_dict:
customheaders = map_dict['headers']
else:
bucketname = map_dict
log.debug(f'mapping: {mapping}')
# Pop mapping off path_list
for _ in mapping:
path_list.pop(0)
# Join the remaining bits together to form object_name
object_name = "/".join(path_list)
bucket_path = "/".join(mapping)
log.info("Bucket mapping was {0}, object was {1}".format(bucket_path, object_name))
return prepend_bucketname(bucketname), bucket_path, object_name, customheaders
if path_part in map_dict:
map_dict = map_dict[path_part]
mapping.append(path_part)
log.debug("Found {0}, Mapping is now {1}".format(path_part, "/".join(mapping)))
else:
log.warning("Could not find {0} in bucketmap".format(path_part))
log.debug('said bucketmap: {}'.format(map_dict))
return False, False, False, {}
# what? No path?
return False, False, False, {}
def process_varargs(varargs: list, b_map: dict):
"""
wrapper around process_request that returns legacy values to preserve backward compatibility
:param varargs: a list with the path to the file requested.
:param b_map: bucket map
:return: path, bucket, object_name
"""
log.warning('Deprecated process_varargs() called.')
path, bucket, object_name, _ = process_request(varargs, b_map)
return path, bucket, object_name
def process_request(varargs, b_map):
varargs = varargs.split("/")
# Make sure we got at least 1 path, and 1 file name:
if len(varargs) < 2:
return "/".join(varargs), None, None, []
# Watch for ASF-ish reverse URL mapping formats:
if len(varargs) == 3:
if os.getenv('USE_REVERSE_BUCKET_MAP', 'FALSE').lower() == 'true':
varargs[0], varargs[1] = varargs[1], varargs[0]
# Look up the bucket from path parts
bucket, path, object_name, headers = get_bucket_dynamic_path(varargs, b_map)
# If we didn't figure out the bucket, we don't know the path/object_name
if not bucket:
object_name = varargs.pop(-1)
path = "/".join(varargs)
return path, bucket, object_name, headers
def bucket_prefix_match(bucket_check, bucket_map, object_name=""):
log.debug(f"bucket_prefix_match(): checking if {bucket_check} matches {bucket_map} w/ optional obj '{object_name}'")
if bucket_check == bucket_map.split('/')[0] and object_name.startswith("/".join(bucket_map.split('/')[1:])):
log.debug(f"Prefixed Bucket Map matched: s3://{bucket_check}/{object_name} => {bucket_map}")
return True
return False
# Sort public/private buckets such that object-prefixes are processed FIRST
def get_sorted_bucket_list(b_map, bucket_group):
if bucket_group not in b_map:
# But why?!
log.warning(f"Bucket map does not contain bucket group '{bucket_group}'")
return []
# b_map[bucket_group] SHOULD be a dict, but list actually works too.
if isinstance(b_map[bucket_group], dict):
return sorted(list(b_map[bucket_group].keys()), key=lambda e: e.count("/"), reverse=True )
if isinstance(b_map[bucket_group], list):
return sorted(list(b_map[bucket_group]), key=lambda e: e.count("/"), reverse=True )
# Something went wrong.
return []
def check_private_bucket(bucket, b_map, object_name=""):
log.debug('check_private_buckets(): bucket: {}'.format(bucket))
# Check public bucket file:
if 'PRIVATE_BUCKETS' in b_map:
# Prioritize prefixed buckets first, the deeper the better!
sorted_buckets = get_sorted_bucket_list(b_map, 'PRIVATE_BUCKETS')
log.debug(f"Sorted PRIVATE buckets are {sorted_buckets}")
for priv_bucket in sorted_buckets:
if bucket_prefix_match(bucket, prepend_bucketname(priv_bucket), object_name):
# This bucket is PRIVATE, return group!
return b_map['PRIVATE_BUCKETS'][priv_bucket]
return False
def check_public_bucket(bucket, b_map, object_name=""):
# Check for PUBLIC_BUCKETS in bucket map file
if 'PUBLIC_BUCKETS' in b_map:
sorted_buckets = get_sorted_bucket_list(b_map, 'PUBLIC_BUCKETS')
log.debug(f"Sorted PUBLIC buckets are {sorted_buckets}")
for pub_bucket in sorted_buckets:
if bucket_prefix_match(bucket, prepend_bucketname(pub_bucket), object_name):
# This bucket is public!
log.debug("found a public, we'll take it")
return True
# Did not find this in public bucket list
log.debug('we did not find a public bucket for {}'.format(bucket))
return False
| [
[
[
7,
14
],
[
115,
122
]
],
[
[
22,
26
],
[
442,
446
]
],
[
[
47,
53
],
[
480,
486
],
[
1691,
1697
]
],
[
[
61,
63
],
[
262,
264
],
[
316,
318
],
[
4899,
4901
]
],
[
[
71,
77
],
[
1246,
1252
],
[
1412,
1418
]
],
[
[
99,
107
],
[
613,
621
]
],
[
[
109,
112
],
[
2616,
2619
],
[
3204,
3207
],
[
3511,
3514
],
[
3814,
3817
],
[
3921,
3924
],
[
3998,
4001
],
[
4447,
4450
],
[
5433,
5436
],
[
5671,
5674
],
[
5990,
5993
],
[
6546,
6549
],
[
6828,
6831
],
[
7377,
7380
],
[
7622,
7625
],
[
7744,
7747
]
],
[
[
222,
240
],
[
3614,
3632
],
[
6972,
6990
],
[
7519,
7537
]
],
[
[
405,
415
],
[
1879,
1889
],
[
1966,
1976
],
[
2027,
2037
],
[
2080,
2090
],
[
2168,
2178
]
],
[
[
494,
511
]
],
[
[
2365,
2388
],
[
5106,
5129
]
],
[
[
4153,
4168
]
],
[
[
4609,
4624
],
[
4534,
4549
]
],
[
[
5366,
5385
],
[
6944,
6963
],
[
7491,
7510
]
],
[
[
5883,
5905
],
[
6771,
6793
],
[
7321,
7343
]
],
[
[
6488,
6508
]
],
[
[
7159,
7178
]
]
] |
from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_sources,get_articles
from ..models import Sources
#views
@main.route('/')
def index():
'''
view root page function that returns the index the page and its data
'''
sources = get_sources('business')
sports_sources = get_sources('sports')
technology_sources = get_sources('technology')
entertainment_sources = get_sources('entertainment')
title = "News Of The Day"
return render_template('index.html',title = title, sources = sources,sports_sources = sports_sources,technology_sources = technology_sources,entertainment_sources = entertainment_sources)
@main.route('/sources/<id>')
def articles(id):
'''
view articles page
'''
articles = get_articles(id)
title = f'NH | {id}'
return render_template('articles.html',title= title,articles = articles) | [
[
[
18,
33
],
[
487,
502
],
[
806,
821
]
],
[
[
34,
41
]
],
[
[
42,
50
]
],
[
[
51,
58
]
],
[
[
73,
77
],
[
164,
168
],
[
670,
674
]
],
[
[
101,
112
],
[
284,
295
],
[
326,
337
],
[
370,
381
],
[
421,
432
]
],
[
[
113,
125
],
[
758,
770
]
],
[
[
147,
154
]
],
[
[
184,
189
]
],
[
[
702,
710
]
]
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields
from polyaxon_schemas.ml.layers.base import BaseLayerConfig, BaseLayerSchema
class WrapperSchema(BaseLayerSchema):
layer = fields.Nested('LayerSchema')
@staticmethod
def schema_config():
return WrapperConfig
class WrapperConfig(BaseLayerConfig):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.
Args:
layer: The layer to be wrapped.
"""
IDENTIFIER = 'Wrapper'
SCHEMA = WrapperSchema
def __init__(self, layer, **kwargs):
super(WrapperConfig, self).__init__(**kwargs)
self.layer = layer
class TimeDistributedSchema(WrapperSchema):
@staticmethod
def schema_config():
return TimeDistributedConfig
class TimeDistributedConfig(WrapperConfig):
"""This wrapper allows to apply a layer to every temporal slice of an input.
The input should be at least 3D, and the dimension of index one
will be considered to be the temporal dimension.
Consider a batch of 32 samples,
where each sample is a sequence of 10 vectors of 16 dimensions.
The batch input shape of the layer is then `(32, 10, 16)`,
and the `input_shape`, not including the samples dimension, is `(10, 16)`.
You can then use `TimeDistributed` to apply a `Dense` layer
to each of the 10 timesteps, independently:
```python
# as the first layer in a model
x = TimeDistributed(Dense(8))(x)
# now x.output_shape == (None, 10, 8)
```
The output will then have shape `(32, 10, 8)`.
In subsequent layers, there is no need for the `input_shape`:
```python
x = TimeDistributed(Dense(32))(x)
# now x.output_shape == (None, 10, 32)
```
The output will then have shape `(32, 10, 32)`.
`TimeDistributed` can be used with arbitrary layers, not just `Dense`,
for instance with a `Conv2D` layer:
```python
x = TimeDistributed(Conv2D(64, (3, 3)))(x)
```
Args:
layer: a layer instance.
Polyaxonfile usage:
```yaml
TimeDistributed:
layer:
Dense:
units: 2
```
"""
IDENTIFIER = 'TimeDistributed'
SCHEMA = TimeDistributedSchema
class BidirectionalSchema(WrapperSchema):
@staticmethod
def schema_config():
return BidirectionalConfig
class BidirectionalConfig(WrapperConfig):
"""Bidirectional wrapper for RNNs.
Args:
layer: `Recurrent` instance.
merge_mode: Mode by which outputs of the
forward and backward RNNs will be combined.
One of {'sum', 'mul', 'concat', 'ave', None}.
If None, the outputs will not be combined,
they will be returned as a list.
Raises:
ValueError: In case of invalid `merge_mode` argument.
Example:
```python
x = Bidirectional(plx.layers.LSTM(units=128, dropout=0.2, recurrent_dropout=0.2))(x)
```
Polyaxonfile usage:
```yaml
Bidirectional:
layer:
LSTM:
units: 128
dropout: 0.2
recurrent_dropout: 0.2
```
"""
IDENTIFIER = 'Bidirectional'
SCHEMA = BidirectionalSchema
| [
[
[
47,
62
]
],
[
[
64,
72
]
],
[
[
74,
88
]
],
[
[
114,
120
],
[
251,
257
]
],
[
[
166,
181
],
[
375,
390
]
],
[
[
183,
198
],
[
221,
236
]
],
[
[
207,
220
],
[
746,
759
],
[
913,
926
],
[
2482,
2495
]
],
[
[
361,
374
],
[
1039,
1052
],
[
2605,
2618
],
[
339,
352
],
[
816,
829
]
],
[
[
891,
912
],
[
2432,
2453
]
],
[
[
1017,
1038
],
[
987,
1008
]
],
[
[
2462,
2481
],
[
3395,
3414
]
],
[
[
2585,
2604
],
[
2557,
2576
]
]
] |
import pytest
from pages.aplication import Application
def pytest_addoption(parser):
parser.addoption('--browser_name', action='store', default="chrome", help="Choose browser: chrome or firefox")
parser.addoption('--base_url', action='store', default='https://prodoctorov.ru/new/rate/doctor/12/'
, help="Choose base_url")
@pytest.fixture
def app(request):
browser_name = request.config.getoption("--browser_name") # для вызова из командной строки и выбора браузера
base_url = request.config.getoption("--base_url")
fixture = Application(browser_name=browser_name, base_url=base_url)
yield fixture
print("\nquit browser..")
fixture.destroy()
return fixture
| [
[
[
7,
13
],
[
356,
362
]
],
[
[
43,
54
],
[
571,
582
]
],
[
[
61,
77
]
],
[
[
375,
378
]
]
] |
class optimType:
REACTION_KO = 1
REACTION_UO = 2
GENE_KO = 3
GENE_UO = 4
MEDIUM = 5
MEDIUM_LEVELS = 6
MEDIUM_REACTION_KO = 7
MEDIUM_REACTION_UO = 8
COMPOSITION = 9
PROTEIN_KO = 10
PROTEIN_UO = 11
types = {1:"Reaction Knockouts",2:"Reaction Under/Over expression", 3:"Gene Knockouts",
4:"Gene Under/Over expression", 5:"Medium compositions",6:"Medium compositions with levels",
7:"Medium with Reaction Knockouts",8: "Medium with Reaction Under/Over expression",
9:"Community Composition", 10:"Protein knockouts", 11:"Protein Under/Over expression"}
def get_optim_type_name(self, id):
return optimType.types.get(id)
class solverMethod:
LSODA = 1
LSODAR = 2
LSODE = 3
HEUN = 4
EULER = 5
RK4 = 6
DORMAN_PRINCE = 7
RKFehlberg = 8
Dopri5 = 9
Dop853 = 10
Vode = 11
Radau5 = 12
AdamsBashforth2=13
AdamsBashMoulton2=14
methods ={1:"LSODA",2:"LSODAR", 3: "LSODE", 4: "HEUN", 5: "EULER",
6: "Range Kutta 4", 7: "DORMAN PRINCE", 8: "RKFehlberg", 9: "Dopri5", 10: "Dop853", 11: "Vode",
12: "Radau5", 13: "AdamsBashforth2", 14: "AdamsBashMoulton2"
}
def get_solver_method_name(self, id):
return solverMethod.methods.get(id)
class solverStatus:
'''
Enumeration of possible solution status. (FROM FRAMED
'''
OPTIMAL = 1
UNKNOWN = 0
ERROR = 2
SUBOPTIMAL = -1
UNBOUNDED = -2
INFEASIBLE = -3
INF_OR_UNB = -4
@staticmethod
def get_status_str(id):
if solverStatus.ERROR == id :
str="Error"
elif solverStatus.OPTIMAL == id:
str = "Optimal"
elif solverStatus.SUBOPTIMAL == id:
str = "Sub-Optimal"
elif solverStatus.UNBOUNDED == id or solverStatus.INFEASIBLE == id or solverStatus.INF_OR_UNB == id:
str = "Infeasible or unbounded problem."
else:
str = "Unknown"
return str
| [
[
[
6,
15
],
[
691,
700
]
],
[
[
723,
735
],
[
1305,
1317
]
],
[
[
1342,
1354
],
[
1615,
1627
],
[
1679,
1691
],
[
1748,
1760
],
[
1824,
1836
],
[
1857,
1869
],
[
1890,
1902
]
]
] |
from enum import Enum
from typing import Optional
from uuid import UUID
from pydantic import BaseModel
from app.models import User, Organization
class DataRoomBase(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
class DataRoomCreateRequest(DataRoomBase):
name: str
class DataRoomCreate(DataRoomCreateRequest):
creator: User
organization: Organization
class Config:
arbitrary_types_allowed = True
class DataRoomRole(str, Enum):
OWNER = "OWNER"
ADMIN = "ADMIN"
MEMBER = "MEMBER"
class DataRoomUserRoleRequest(BaseModel):
user_id: UUID
user_role: DataRoomRole
class Config:
use_enum_values = True
class DataRoomTeamRoleRequest(BaseModel):
team_id: UUID
team_role: DataRoomRole
class Config:
use_enum_values = True
class DataRoomUpdate(DataRoomBase):
pass
class DataRoomInDBBase(DataRoomBase):
id: Optional[UUID] = None
class Config:
orm_mode = True
| [
[
[
17,
21
],
[
488,
492
]
],
[
[
41,
49
],
[
190,
198
],
[
228,
236
],
[
932,
940
]
],
[
[
67,
71
],
[
614,
618
],
[
754,
758
],
[
941,
945
]
],
[
[
94,
103
],
[
168,
177
],
[
589,
598
],
[
729,
738
]
],
[
[
128,
132
],
[
368,
372
]
],
[
[
134,
146
],
[
391,
403
]
],
[
[
155,
167
],
[
279,
291
],
[
860,
872
],
[
909,
921
]
],
[
[
257,
278
],
[
331,
352
]
],
[
[
316,
330
]
],
[
[
470,
482
],
[
634,
646
],
[
774,
786
]
],
[
[
565,
588
]
],
[
[
705,
728
]
],
[
[
845,
859
]
],
[
[
892,
908
]
]
] |
import json
from datetime import datetime, timedelta
from bittrex.bittrex import Bittrex
def TradingAlorythm(command, market, amount, coinname, step, stoploss, key, secret):
TestTrading = Bittrex(key, secret)
period = timedelta(seconds=20)
next_tick = datetime.now() + period
seconds = 20
firstCycle = True
if command == "y":
print("buying {0} of {1} coins".format(amount, coinname))
# раскомментировать для созадния ордера на покупку
# TestTrading.buy_limit(market, amount, coinprice)
while command == "y":
# таймер каждые 20 секунд
if next_tick <= datetime.now():
print("Connecting to Bittrex")
seconds += 20
next_tick += period
print("Timer ticked")
print("Updating stock exchange...")
# Считываем значения курса
t = TestTrading.get_ticker(market)
# Запрашиваем баланс
balance = TestTrading.get_balance(coinname)
# Запрашиваем текущие ордера
orders = TestTrading.get_open_orders(market)
a = json.dumps(t)
# Печатаем значения курса
print(t)
# Печатаем баланс
print("Balance is {} ".format(balance['result']['Available']))
# Печатаем ордера
print(orders)
# Раскладываем по переменным
bid = t['result']['Bid']
ask = t['result']['Ask']
last = t['result']['Last']
if firstCycle:
StartValue = bid
firstCycle = False
Stop_loss = StartValue - 0.00000007
print("*--------------------------")
print("| Start Value | {: .8f} ".format(StartValue))
print("| Stop loss | {: .8f} ".format(Stop_loss))
print("|--------------------------")
print("| Bid | {: .8f} ".format(bid))
print("| Ask | {: .8f} ".format(ask))
print("| Last | {: .8f} ".format(last))
print("*--------------------------")
# Добавляем Bid в конец массива
# A.append(float(bid))
if bid >= step + StartValue:
print("MOVE STOP-LOSS")
StartValue = bid
if bid <= stoploss:
print("Sell order sent") | [
[
[
7,
11
],
[
1109,
1113
]
],
[
[
33,
41
],
[
266,
274
],
[
621,
629
]
],
[
[
43,
52
],
[
228,
237
]
],
[
[
81,
88
],
[
194,
201
]
],
[
[
95,
110
]
]
] |
#!/usr/bin/env python3
"""flash.py
Usage:
flash.py [<image>] [options]
flash.py (-h | --help)
Options:
-h --help Show this screen.
--target=<target> Select the target device [default: SAM3x8e].
--erase Erase the target before flashing.
--port=<p> Target device port [default: ttyACM0].
-v --verify Hash the flash and compare to binary.
-r --reset Reset the CPU (after write).
--bootloader=<bl> Specify a custom bootloader binary
[default: sam3x8e/bootloader.bin].
--plane=<pl> Select flash plane 0 or 1 [default: 0].
--boot-rom Boot from ROM.
"""
from sys import exit, stdout
import time
from docopt import docopt
from py.uart import Serial
from py import *
if __name__ == '__main__':
args = docopt(__doc__)
target = args['--target']
image = args['<image>']
port = '/dev/' + args['--port']
plane = int(args['--plane'])
bootloader = args['--bootloader']
verify = args['--verify']
erase = args['--erase']
boot_rom = args['--boot-rom']
reset = args['--reset']
print('Selected port:', port)
print('Selected image:', image)
if target == 'HT32':
from py.ht32.isp import ISP, isp
image = args['<image>']
if image is None:
if reset:
isp(Serial(port)).reset()
else:
print('No image specified, not flashing.')
else:
with open(image, 'rb') as f:
binary = f.read()
isp = ISP(Serial(port))
isp.page_erase(start_addr=0x0, end_addr=0x1000)
isp.flash(0x00, binary)
isp.reset()
elif target == 'SAM3x8e':
from py.sam3x8e.programmer import program
if bootloader is None:
bootloader = 'sam3x8e/bootloader.bin'
program(port, image=image, erase=erase, reset=True,\
verify=verify, bootloader_image=bootloader, plane=plane, boot_rom=boot_rom)
else:
print('Unknown target.')
| [
[
[
667,
671
]
],
[
[
673,
679
]
],
[
[
687,
691
]
],
[
[
712,
718
],
[
803,
809
]
],
[
[
739,
745
],
[
1342,
1348
],
[
1552,
1558
]
],
[
[
762,
763
]
],
[
[
796,
800
],
[
832,
836
],
[
861,
865
],
[
898,
902
],
[
929,
933
],
[
963,
967
],
[
997,
1001
],
[
1026,
1030
],
[
1057,
1061
],
[
1088,
1092
],
[
1258,
1262
]
],
[
[
823,
829
],
[
1183,
1189
],
[
1696,
1702
]
],
[
[
853,
858
],
[
1168,
1173
],
[
1876,
1881
]
],
[
[
881,
885
],
[
1133,
1137
],
[
1349,
1353
],
[
1559,
1563
],
[
1864,
1868
]
],
[
[
917,
922
],
[
1975,
1980
]
],
[
[
950,
960
],
[
1778,
1788
],
[
1957,
1967
]
],
[
[
988,
994
],
[
1932,
1938
]
],
[
[
1018,
1023
],
[
1889,
1894
]
],
[
[
1046,
1054
],
[
1991,
1999
]
],
[
[
1080,
1085
],
[
1315,
1320
]
],
[
[
1233,
1236
],
[
1548,
1551
]
],
[
[
1238,
1241
],
[
1338,
1341
]
],
[
[
1250,
1255
],
[
1285,
1290
],
[
1477,
1482
]
],
[
[
1493,
1494
],
[
1521,
1522
]
],
[
[
1512,
1518
],
[
1654,
1660
]
],
[
[
1542,
1545
],
[
1578,
1581
],
[
1638,
1641
],
[
1674,
1677
]
],
[
[
1759,
1766
],
[
1856,
1863
]
],
[
[
1810,
1820
],
[
1957,
1967
]
]
] |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class PrivateLinkResourcesOperations(object):
"""PrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2022_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResourcesListResult":
"""Gets a list of private link resources in the specified managed cluster.
To learn more about private clusters, see:
https://docs.microsoft.com/azure/aks/private-clusters.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourcesListResult, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_04_01.models.PrivateLinkResourcesListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourcesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourcesListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources"} # type: ignore
| [
[
[
519,
522
],
[
1179,
1182
],
[
1186,
1189
],
[
1382,
1385
],
[
3764,
3767
]
],
[
[
524,
532
],
[
1111,
1119
]
],
[
[
534,
538
],
[
1169,
1173
]
],
[
[
540,
548
],
[
1102,
1110
]
],
[
[
550,
557
],
[
1079,
1086
]
],
[
[
578,
588
],
[
1207,
1217
]
],
[
[
624,
649
],
[
4738,
4763
]
],
[
[
651,
668
],
[
5743,
5760
]
],
[
[
670,
689
],
[
4798,
4817
]
],
[
[
691,
712
],
[
4770,
4791
]
],
[
[
714,
723
],
[
5641,
5650
]
],
[
[
756,
772
],
[
1121,
1137
]
],
[
[
815,
827
],
[
1151,
1163
]
],
[
[
856,
867
],
[
1138,
1149
],
[
1391,
1402
],
[
2646,
2657
]
],
[
[
909,
926
],
[
3638,
3655
]
],
[
[
966,
980
],
[
5793,
5807
]
],
[
[
997,
1014
],
[
3419,
3426
]
],
[
[
1037,
1053
],
[
5275,
5291
]
],
[
[
1055,
1074
],
[
2211,
2230
]
],
[
[
1075,
1076
],
[
1166,
1167
]
],
[
[
1092,
1099
]
],
[
[
1193,
1204
],
[
1220,
1231
],
[
1814,
1825
],
[
1917,
1928
],
[
2038,
2049
],
[
2402,
2413
],
[
2590,
2601
]
],
[
[
1268,
1286
],
[
4986,
5004
]
],
[
[
2799,
2829
]
]
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-09-30 18:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menu', '0005_auto_20170930_1059'),
]
operations = [
migrations.AlterField(
model_name='item',
name='description',
field=models.CharField(max_length=255),
),
]
| [
[
[
95,
111
]
],
[
[
135,
145
],
[
172,
182
],
[
296,
306
]
],
[
[
147,
153
],
[
400,
406
]
],
[
[
162,
171
]
]
] |
from importlib import import_module
from django.db.models.signals import post_migrate
from django.apps import AppConfig
def default_data_setup(sender, **kwargs):
from django.contrib.auth.models import User
try:
anon = User.objects.get(username='ANONYMOUS_USER')
except User.DoesNotExist:
print('Adding ANONYMOUS_USER')
anon = User.objects.create_user('ANONYMOUS_USER', 'anonymous_user@example.com')
# Make the user un usable
anon.set_unusable_password()
anon.is_active = False
anon.save()
class RadioConfig(AppConfig):
name = 'radio'
def ready(self):
post_migrate.connect(default_data_setup, sender=self)
| [
[
[
22,
35
]
],
[
[
73,
85
],
[
641,
653
]
],
[
[
111,
120
],
[
579,
588
]
],
[
[
126,
144
],
[
662,
680
]
],
[
[
567,
578
]
]
] |
from nltk.util import ngrams
from nltk.corpus import stopwords
from collections import Counter
from .common import get_pp_pipeline
def or_list(booleans):
return True in booleans
def get_ngrams(D):
'''
Returns all ngrams (aka a token containing a dollar sign ($)) from a set of topics or documents
:param topics:
:return:
'''
ngrams = set()
for d in D:
for w in d:
if '$' in w:
ngrams.add(w)
return list(ngrams)
def get_frequent_ngrams(text, n, stopword_list, threshold):
bigrams = ngrams(text, n)
bigram_freq = Counter(bigrams)
frequent_bigrams = []
for bigram, freq in bigram_freq.most_common():
if not (or_list([i in stopword_list for i in bigram])):
if freq > threshold:
frequent_bigrams.append('{}${}'.format(bigram[0], bigram[1]))
else:
break
return frequent_bigrams
def ngrammize_text(text, ngrams):
bigrammized_text = []
i = 0
while i < len(text):
term = text[i]
if i == len(text)-1:
bigrammized_text.append(term)
else:
next_term = text[i+1]
test_bigram = '{}${}'.format(term, next_term)
if test_bigram in ngrams:
bigrammized_text.append(test_bigram)
i += 1
else:
bigrammized_text.append(term)
i += 1
return bigrammized_text
def get_dataset_ngrams(docs, min_freq=1000, sw=None, extra_bigrams=None, extra_ngrams=None):
if not sw:
sw = stopwords.words('english')
sw_pp = get_pp_pipeline(remove_stopwords=False)
sw = sw_pp.clean_document(sw)
full_text = []
for doc in docs:
full_text.extend(doc)
frequent_bigrams = get_frequent_ngrams(full_text, 2, sw, min_freq)
if extra_bigrams:
frequent_bigrams.extend(extra_bigrams)
bigrammized_text = ngrammize_text(full_text, frequent_bigrams)
frequent_ngrams = get_frequent_ngrams(bigrammized_text, 2, sw, min_freq)
if extra_ngrams:
frequent_ngrams.extend(extra_ngrams)
return frequent_bigrams, frequent_ngrams
def insert_ngrams_flat_from_lists(docs, frequent_bigrams, frequent_ngrams):
for i in range(0, len(docs)):
doc = docs[i]
doc = ngrammize_text(doc, frequent_bigrams)
doc = ngrammize_text(doc, frequent_ngrams)
docs[i] = doc
return docs
def insert_ngrams_flat(docs, min_freq=1000, sw=None, extra_bigrams=None, extra_ngrams=None):
fb, fn = get_dataset_ngrams(docs, min_freq, sw, extra_bigrams, extra_ngrams)
return insert_ngrams_flat_from_lists(docs, fb, fn)
def insert_ngrams_from_lists(date_doc_tuples, frequent_bigrams, frequent_ngrams):
for i in range(0, len(date_doc_tuples)):
date, doc = date_doc_tuples[i]
doc = ngrammize_text(doc, frequent_bigrams)
doc = ngrammize_text(doc, frequent_ngrams)
date_doc_tuples[i] = (date, doc)
return date_doc_tuples
def insert_ngrams(date_docs, min_freq=1000, sw=None, extra_bigrams=None, extra_ngrams=None):
fb, fn = get_dataset_ngrams([x[1] for x in date_docs], min_freq, sw, extra_bigrams, extra_ngrams)
return insert_ngrams_from_lists(date_docs, fb, fn)
| [
[
[
22,
28
],
[
562,
568
]
],
[
[
53,
62
],
[
1574,
1583
]
],
[
[
87,
94
],
[
596,
603
]
],
[
[
115,
130
],
[
1617,
1632
]
],
[
[
136,
143
],
[
706,
713
]
],
[
[
189,
199
]
],
[
[
492,
511
],
[
1788,
1807
],
[
1994,
2013
]
],
[
[
939,
953
],
[
1928,
1942
],
[
2308,
2322
],
[
2360,
2374
],
[
2848,
2862
],
[
2900,
2914
]
],
[
[
1457,
1475
],
[
2543,
2561
],
[
3113,
3131
]
],
[
[
2166,
2195
],
[
2622,
2651
]
],
[
[
2441,
2459
]
],
[
[
2672,
2696
],
[
3213,
3237
]
],
[
[
3011,
3024
]
]
] |
import collections
import copy
import logging
import time
from abc import abstractmethod
from ...scheduler import HyperbandScheduler, RLScheduler, FIFOScheduler
from ...scheduler.seq_scheduler import LocalSequentialScheduler
from ...utils import in_ipynb, try_import_mxnet
from ...utils.utils import setup_compute
__all__ = [
'BaseTask',
'compile_scheduler_options',
'compile_scheduler_options_v2',
'create_scheduler']
Results = collections.namedtuple('Results', 'model reward config time metadata')
schedulers = {
'local': LocalSequentialScheduler,
'fifo': FIFOScheduler,
'rl': RLScheduler,
'hyperband_stopping': HyperbandScheduler,
'hyperband_promotion': HyperbandScheduler,
}
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
def create_scheduler(train_fn, scheduler, scheduler_options):
if isinstance(scheduler, str):
scheduler_cls = schedulers[scheduler.lower()]
else:
assert callable(scheduler)
scheduler_cls = scheduler
scheduler_options = copy.copy(scheduler_options)
return scheduler_cls(train_fn, **scheduler_options)
class BaseTask(object):
"""BaseTask for AutoGluon applications
"""
@property
@staticmethod
def Dataset():
try_import_mxnet()
from autogluon.mxnet.utils.dataset import BaseDataset
return BaseDataset
@classmethod
def run_fit(cls, train_fn, search_strategy, scheduler_options,
plot_results=False):
start_time = time.time()
# create scheduler and schedule tasks
scheduler = create_scheduler(train_fn, search_strategy, scheduler_options)
scheduler.run()
scheduler.join_jobs()
# gather the best configuration
best_reward = scheduler.get_best_reward()
best_config = scheduler.get_best_config()
args = train_fn.args
args.final_fit = True
if hasattr(args, 'epochs') and hasattr(args, 'final_fit_epochs'):
args.epochs = args.final_fit_epochs
train_fn.args.update({'final_fit':True})
train_fn.kwvars.update({'final_fit':True})
scheduler_final = create_scheduler(train_fn, search_strategy, scheduler_options)
results = scheduler_final.run_with_config(best_config)
total_time = time.time() - start_time
if plot_results or in_ipynb():
plot_training_curves = scheduler_options['checkpoint'].replace('exp1.ag', 'plot_training_curves.png')
scheduler.get_training_curves(filename=plot_training_curves, plot=True, use_legend=False)
record_args = copy.deepcopy(args)
if results is None:
logger.warning('No valid results obtained with best config, the result may not be useful...')
results = {}
results.update(best_reward=best_reward,
best_config=best_config,
total_time=total_time,
metadata=scheduler.metadata,
training_history=scheduler.training_history,
config_history=scheduler.config_history,
reward_attr=scheduler._reward_attr,
args=record_args)
return results
@classmethod
@abstractmethod
def fit(cls, *args, **kwargs):
pass
# These search_strategies use HyperbandScheduler, along with certain
# searchers.
searcher_for_hyperband_strategy = {
'hyperband': 'random',
'bayesopt_hyperband': 'bayesopt'}
def compile_scheduler_options(
scheduler_options, search_strategy, search_options, nthreads_per_trial,
ngpus_per_trial, checkpoint, num_trials, time_out, resume, visualizer,
time_attr, reward_attr, dist_ip_addrs, epochs=None):
"""
Updates a copy of scheduler_options (scheduler-specific options, can be
empty) with general options. The result can be passed to __init__ of the
scheduler.
Special role of epochs for HyperbandScheduler: If the search_strategy
involves HyperbandScheduler and epochs is given, then this value is
copied to scheduler_options['max_t']. Pass epochs for applications
where the time_attr is epoch, and epochs is the maximum number of
epochs.
:param scheduler_options:
:param search_strategy:
:param search_options:
:param nthreads_per_trial:
:param ngpus_per_trial:
:param checkpoint:
:param num_trials:
:param time_out:
:param resume:
:param visualizer:
:param time_attr:
:param reward_attr:
:param dist_ip_addrs:
:param kwargs:
:param epochs: See above. Optional
:return: Copy of scheduler_options with updates
"""
if scheduler_options is None:
scheduler_options = dict()
else:
assert isinstance(scheduler_options, dict)
assert isinstance(search_strategy, str)
if search_options is None:
search_options = dict()
if visualizer is None:
visualizer = 'none'
if time_attr is None:
time_attr = 'epoch'
if reward_attr is None:
reward_attr = 'accuracy'
scheduler_options = copy.copy(scheduler_options)
scheduler_options.update({
'resource': {
'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
'searcher': search_strategy,
'search_options': search_options,
'checkpoint': checkpoint,
'resume': resume,
'num_trials': num_trials,
'time_out': time_out,
'reward_attr': reward_attr,
'time_attr': time_attr,
'visualizer': visualizer,
'dist_ip_addrs': dist_ip_addrs})
searcher = searcher_for_hyperband_strategy.get(search_strategy)
if searcher is not None:
scheduler_options['searcher'] = searcher
if epochs is not None:
scheduler_options['max_t'] = epochs
return scheduler_options
# TODO: Migrate TextPredictor to use this version, delete old version
def compile_scheduler_options_v2(
scheduler_options, nthreads_per_trial,
ngpus_per_trial, num_trials, time_out, scheduler=None, search_strategy=None, search_options=None, checkpoint=None, resume=False, visualizer=None,
time_attr=None, reward_attr=None, dist_ip_addrs=None, epochs=None):
"""
Updates a copy of scheduler_options (scheduler-specific options, can be
empty) with general options. The result can be passed to __init__ of the
scheduler.
Special role of epochs for HyperbandScheduler: If the search_strategy
involves HyperbandScheduler and epochs is given, then this value is
copied to scheduler_options['max_t']. Pass epochs for applications
where the time_attr is epoch, and epochs is the maximum number of
epochs.
:param scheduler_options:
:param scheduler:
:param search_strategy:
:param search_options:
:param nthreads_per_trial:
:param ngpus_per_trial:
:param checkpoint:
:param num_trials:
:param time_out:
:param resume:
:param visualizer:
:param time_attr:
:param reward_attr:
:param dist_ip_addrs:
:param kwargs:
:param epochs: See above. Optional
:return: Copy of scheduler_options with updates
"""
if scheduler_options is None:
scheduler_options = dict()
else:
assert isinstance(scheduler_options, dict)
scheduler_options = copy.copy(scheduler_options)
if dist_ip_addrs is None:
dist_ip_addrs = []
if search_strategy is None:
search_strategy = 'random'
if scheduler is None:
scheduler = 'local'
assert isinstance(search_strategy, str)
if search_options is None:
search_options = dict()
if visualizer is None:
visualizer = 'none'
if time_attr is None:
time_attr = 'epoch'
if reward_attr is None:
reward_attr = 'validation_performance'
scheduler_params = {
'resource': {
'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
'scheduler': scheduler,
'searcher': search_strategy,
'search_options': search_options,
'checkpoint': checkpoint,
'resume': resume,
'num_trials': num_trials,
'time_out': time_out,
'reward_attr': reward_attr,
'time_attr': time_attr,
'visualizer': visualizer,
'dist_ip_addrs': dist_ip_addrs,
}
resource = None
if 'resource' in scheduler_options:
scheduler_params['resource'].update(scheduler_options['resource'])
resource = scheduler_params['resource'].copy()
scheduler_params.update(scheduler_options)
if resource:
scheduler_params['resource'] = resource
scheduler_params['resource']['num_cpus'], scheduler_params['resource']['num_gpus'] = setup_compute(
nthreads_per_trial=scheduler_params['resource']['num_cpus'],
ngpus_per_trial=scheduler_params['resource']['num_gpus'],
) # TODO: use 'auto' downstream
searcher = searcher_for_hyperband_strategy.get(scheduler_params['searcher'])
if searcher is not None:
scheduler_params['searcher'] = searcher
if epochs is not None:
scheduler_params['max_t'] = epochs
required_options = [
'resource',
'scheduler',
'searcher',
'search_options',
'checkpoint',
'resume',
'num_trials',
'time_out',
'reward_attr',
'time_attr',
'visualizer',
'dist_ip_addrs',
]
missing_options = []
for option in required_options:
if option not in scheduler_params:
missing_options.append(option)
if missing_options:
raise AssertionError(f'Missing required keys in scheduler_options: {missing_options}')
return scheduler_params
| [
[
[
7,
18
],
[
448,
459
]
],
[
[
26,
30
],
[
1049,
1053
],
[
2612,
2616
],
[
5116,
5120
],
[
7352,
7356
]
],
[
[
38,
45
],
[
729,
736
],
[
773,
780
]
],
[
[
53,
57
],
[
1521,
1525
],
[
2310,
2314
]
],
[
[
74,
88
],
[
3263,
3277
]
],
[
[
115,
133
],
[
650,
668
],
[
697,
715
]
],
[
[
135,
146
],
[
611,
622
]
],
[
[
148,
161
],
[
586,
599
]
],
[
[
201,
225
],
[
548,
572
]
],
[
[
247,
255
],
[
2362,
2370
]
],
[
[
257,
273
],
[
1270,
1286
]
],
[
[
301,
314
],
[
8746,
8759
]
],
[
[
316,
323
]
],
[
[
438,
445
]
],
[
[
520,
530
],
[
912,
922
]
],
[
[
720,
726
],
[
757,
763
],
[
2672,
2678
]
],
[
[
795,
811
],
[
1599,
1615
],
[
2163,
2179
]
],
[
[
1142,
1150
]
],
[
[
3410,
3441
],
[
5633,
5664
],
[
8949,
8980
]
],
[
[
3517,
3542
]
],
[
[
5948,
5976
]
]
] |
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
if is_py2:
from urlparse import urlparse
from urllib import quote
from urlparse import urljoin
import pytz as timezone
from email import message_from_string as message_from_bytes_or_string
from __builtin__ import xrange as range_or_xrange
elif is_py3:
from urllib.parse import urlparse
from urllib.parse import quote
from urllib.parse import urljoin
from datetime import timezone
from email import message_from_bytes as message_from_bytes_or_string
from builtins import range as range_or_xrange
def message_as_bytes_or_string(message):
if is_py2:
return message.as_string()
else:
return message.as_bytes()
def is_string_type(value):
if is_py2:
return isinstance(value, basestring)
else:
return type(value) is str
| [
[
[
7,
10
],
[
66,
69
]
],
[
[
59,
63
],
[
109,
113
],
[
149,
153
]
],
[
[
99,
105
],
[
167,
173
],
[
757,
763
],
[
880,
886
]
],
[
[
139,
145
],
[
432,
438
]
],
[
[
200,
208
]
],
[
[
232,
237
]
],
[
[
263,
270
]
],
[
[
282,
298
]
],
[
[
321,
372
]
],
[
[
401,
426
]
],
[
[
469,
477
]
],
[
[
507,
512
]
],
[
[
542,
549
]
],
[
[
575,
583
]
],
[
[
606,
656
]
],
[
[
682,
706
]
],
[
[
713,
739
]
],
[
[
850,
864
]
]
] |
"""motiv synchronization primitives
Module:
Using a uniform interface to define synchronization
primitives helps us use multiple execution frameworks
without changing any of the code written.
for example, multiprocessing vs threading.
"""
import abc
class SystemEvent(abc.ABC):
"""Event abstract class"""
@abc.abstractmethod
def is_set(self):
"""checks if the event is set."""
@abc.abstractmethod
def set(self):
"""sets the event"""
@abc.abstractmethod
def clear(self):
"""clears the event"""
@abc.abstractmethod
def wait(self, *args, **kwargs):
"""waits till event is set"""
__all__ = [
'SystemEvent',
]
| [
[
[
264,
267
],
[
288,
291
],
[
335,
338
],
[
424,
427
],
[
497,
500
],
[
574,
577
]
],
[
[
276,
287
]
],
[
[
670,
677
]
]
] |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin,BaseUserManager
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager for user profiles """
def create_user(self,email,name,password=None):
""" Create a New user profile"""
if not email:
raise ValueError("User must gave an email address")
email=self.normalize_email(email)
user=self.model(email=email,name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,name,password):
"""Create and save a new superuser with given details """
user=self._create_user(email,aname,password)
user.is_superuser=True
user.is_staff=True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
""" Database model for users in the system """
email=models.EmailField(max_length=100,unique=True)
name=models.CharField(max_length=255)
is_active=models.BooleanField(default=True)
is_staff=models.BooleanField(default=False)
objects=UserProfileManager()
USERNAME_FIELD='email'
REQUIRED_FIELDS=['name']
def get_full_name(self):
""" Retrieve full name of user """
return self.name
def get_short_name(self):
""" Retrieve short name of User """
return self.name
def __str__(self):
"""Return String representation """
return self.email
| [
[
[
22,
28
],
[
1029,
1035
],
[
1084,
1090
],
[
1131,
1137
],
[
1178,
1184
]
],
[
[
68,
84
],
[
932,
948
]
],
[
[
124,
140
],
[
949,
965
]
],
[
[
141,
156
],
[
210,
225
]
],
[
[
191,
209
],
[
1226,
1244
]
],
[
[
920,
931
]
]
] |
"""Rule generation utilities."""
load("@org_tensorflow//tensorflow:tensorflow.bzl", "if_not_windows", "tf_binary_additional_srcs", "tf_cc_binary", "tf_copts")
load("//tensorflow_decision_forests/tensorflow:utils.bzl", "rpath_linkopts_to_tensorflow")
def py_wrap_yggdrasil_learners(
name = None,
learner_deps = []):
"""Creates Keras wrappers around Yggdrasil Decision Forest (YDF) learners.
Creates a py_library called "{name}" and containing the file "{name}.py".
This library introduces a TensorFlow Decision Forests (TFDF) Keras class
wrapping for each YDF learner defined in "learner_deps". The constructor of
these classes contains a argument for the learner generic hyper-parameter.
For example, if "learner_deps" contains a c++ dependency that register a
learner with a key equal to "RANDOM_FOREST", the wrapper will create a
python class called "RandomForestModel" deriving the base TFDF model class.
Args:
name: Name of the rule.
learner_deps: List of dependencies linking Yggdrasil Decision Forest
learners.
"""
# Absolute path to the wrapper generator directory.
wrapper_package = "//tensorflow_decision_forests/keras/wrapper"
# Filename of the wrapper generator source code in the user package.
local_cc_main = name + "_wrapper_main.cc"
# Target name of the wrapper generator binary.
wrapper_name = name + "_wrapper_main"
# Target name of the command running the wrapper generator.
run_wrapper_name = name + "_run_wrapper"
# Copy the wrapper main source code to the user package.
native.genrule(
name = name + "_copy_cc_main",
outs = [local_cc_main],
srcs = [wrapper_package + ":wrapper_main.cc"],
cmd = "cp $< $@",
)
# Compiles the wrapper binary.
tf_cc_binary(
name = wrapper_name,
copts = tf_copts(),
linkopts = if_not_windows(["-lm", "-Wl,-ldl"]) + rpath_linkopts_to_tensorflow(wrapper_name),
srcs = [":" + local_cc_main],
deps = [
wrapper_package + ":wrapper",
] + learner_deps,
linkstatic = 1,
)
# Runs the wrapper binary and generate the wrapper .py source code.
native.genrule(
name = run_wrapper_name,
srcs = [],
outs = [name + ".py"],
cmd = "$(location " + wrapper_name + ") > \"$@\"",
tools = [":" + wrapper_name] + tf_binary_additional_srcs(),
)
# Python library around the generated .py source code.
native.py_library(
name = name,
srcs = [name + ".py"],
srcs_version = "PY3",
deps = [
"//tensorflow_decision_forests/keras:core",
"@org_tensorflow//tensorflow/python",
"@ydf//yggdrasil_decision_forests/model:abstract_model_py_proto",
"@ydf//yggdrasil_decision_forests/learner:abstract_learner_py_proto",
],
data = [":" + run_wrapper_name, ":" + wrapper_name],
)
| [
[
[
256,
282
]
]
] |
"""
Input: tsv file in the form
Input Video filename | topic | subtopic | title greek | title english | start time | end time | delete segments
input.mp4 | 1 | 1 | έξοδος | output | 00:10:05 | 00:30:10 | 00:11:15-00:12:30,00:20:35-00:22:10
"""
import os
import subprocess
import sys
import yaml
def run_cmd(command: str):
"""run_cmd Run given shell command
Args:
command (str): Shell command to run
Returns:
(int, str): Status code, stdout of shell command
Examples:
>>> run_cmd("ls /")
(0, 'bin\nboot\ndev\netc\nhome\ninit\nlib\nlib32\nlib64\nlibx32\nlost+found\nmedia\nmnt\nopt\nproc\nroot\nrun\nsbin\nsnap\nsrv\nsys\ntmp\nusr\nvar\n')
"""
command = f'{os.getenv("SHELL")} -c "{command}"'
pipe = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
stdout = ""
if pipe.stdout is not None:
stdout = "".join(
[line.decode("utf-8") for line in iter(pipe.stdout.readline, b"")]
)
pipe.stdout.close()
returncode = pipe.wait()
print(stdout)
return returncode, stdout
def out_video(segment, greek=True):
title_idx = 3 if greek else 4
title, topic, subtopic = segment[title_idx], segment[1], segment[2]
name = f"{title}_{topic}-{subtopic}.mp4"
return name
def input_video(segment):
return segment[0]
def manage_timestamps(segment):
try:
st, et = segment[5], segment[6]
except:
st = segment[5]
return [st]
try:
delete_timestamps = segment[7]
except:
return [st, et]
if not delete_timestamps:
return [st, et]
else:
return (
[st]
+ [
t
for s in delete_timestamps.split(",")
for t in (s.split("-")[0], s.split("-")[1])
]
+ [et]
)
def to_cut_fmt(timestamp):
out = ""
labels = ["h", "m", "s"]
lb_idx = 0
for c in timestamp:
if c == ":":
out += labels[lb_idx]
lb_idx += 1
else:
out += c
return out
def to_cut_yaml(inmp4, outmp4, ymlname, timestamps):
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return list(zip(a, a))
timestamps = [to_cut_fmt(t) for t in timestamps]
timeframe = []
if len(timestamps) == 1:
timeframe = [{"from": "start", "to": timestamps[0]}]
else:
for s, e in pairwise(["start"] + timestamps + ["end"]):
timeframe += [{"from": s, "to": e}]
out = {
"input": inmp4,
"output": outmp4,
"cut_method": "delete",
"timeframe": timeframe,
}
with open(ymlname, "w") as fd:
yaml.dump(out, fd, default_flow_style=False, sort_keys=False)
def format_timestamp_args(timestamps):
if len(timestamps) == 1:
return [f"-ss {timestamps[0]} "]
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return list(zip(a, a))
cmds = [f"-ss {s} -to {e}" for s, e in pairwise(timestamps)]
return cmds
def ffmpeg(inp, out, timestamps_args):
if len(timestamps_args) == 1:
run_cmd(f"ffmpeg -y -i '{inp}' " + timestamps_args[0] + f" -c:v h265_nvenc -crf 24 -preset fast -c:a copy '{out}'")
return
mp4s = []
for i, arg in enumerate(timestamps_args):
mp4s.append(f"{i}.mp4")
cmd = f"ffmpeg -i '{inp}' " + arg + f" -c:v h265_nvenc -crf 24 -preset fast -c:a copy '{i}.mp4'"
print(cmd)
run_cmd(cmd)
tmp = ".tmp_files.txt"
with open(tmp, "w") as fd:
for f in mp4s:
fd.write(f"file '{f}'\n")
run_cmd(f"ffmpeg -y -f concat -i .tmp_files.txt '{out}'")
run_cmd(f"rm {tmp} " + " ".join(mp4s))
def read_split_tsv(timestamp_file):
with open(timestamp_file) as f:
segments = [ln.strip().split("\t") for ln in f]
return segments
def main():
timestamp_file = sys.argv[1]
segments = read_split_tsv(timestamp_file)
for segment in segments:
inmp4 = input_video(segment)
outmp4 = "out/" + out_video(segment, greek=True)
timestamps = manage_timestamps(segment)
timestamp_args = format_timestamp_args(timestamps)
ffmpeg(inmp4, outmp4, timestamp_args)
def main1():
timestamp_file = sys.argv[1]
segments = read_split_tsv(timestamp_file)
for i, segment in enumerate(segments):
inmp4 = input_video(segment)
outmp4 = out_video(segment, greek=True)
timestamps = manage_timestamps(segment)
to_cut_yaml(inmp4, outmp4, f"{i}.yml", timestamps)
if __name__ == "__main__":
main()
| [
[
[
289,
291
],
[
752,
754
]
],
[
[
299,
309
],
[
799,
809
],
[
853,
863
],
[
877,
887
]
],
[
[
317,
320
],
[
4096,
4099
],
[
4467,
4470
]
],
[
[
329,
333
],
[
2837,
2841
]
],
[
[
340,
347
],
[
3313,
3320
],
[
3671,
3678
],
[
3809,
3816
],
[
3871,
3878
]
],
[
[
1178,
1187
],
[
4247,
4256
],
[
4623,
4632
]
],
[
[
1384,
1395
],
[
4200,
4211
],
[
4585,
4596
]
],
[
[
1434,
1451
],
[
4299,
4316
],
[
4675,
4692
]
],
[
[
1950,
1960
],
[
2395,
2405
]
],
[
[
2191,
2202
],
[
4710,
4721
]
],
[
[
2905,
2926
],
[
4351,
4372
]
],
[
[
3236,
3242
],
[
4393,
4399
]
],
[
[
3916,
3930
],
[
4123,
4137
],
[
4494,
4508
]
],
[
[
4067,
4071
],
[
4794,
4798
]
],
[
[
4437,
4442
]
]
] |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
__version__ = '3.2.2'
| [
[
[
568,
579
]
]
] |
#!/usr/bin/env python
from threading import Timer,Thread
import RPIO
from RPIO import PWM
import paramiko
import json
import sys
from time import time, sleep
from relaxxapi.relaxxapi import relaxx
r = None
sftp_base_path = "/home/shack/music"
button = 4
loud1 = 21
loud2 = 22
state = 0
def init_state():
state = 0
RPIO.setup(loud1, RPIO.OUT)
RPIO.setup(loud2, RPIO.OUT)
t1_2 = 1
timer=None
t2_4 = 1
t4_5 = 3
def time3_trans():
global state
if state is 4:
state = 5
stop_sirene1()
stop_sirene2()
disable_all_timers()
delete_current_music()
state = 0
else:
print("State is not 4, will do nothing")
def time2_trans():
global state
global timer
if state is 2:
state = 4
start_sirene2()
timer= Timer(t4_5,time3_trans).start()
else:
print("State is not 2, will do nothing")
def time1_trans():
global state
global timer
if state is 1:
state = 2
start_sirene1()
timer=Timer(t2_4,time2_trans).start()
else:
print("State is not 1, will do nothing")
def btn_trans(a,edge):
global state
global timer
print("Button: %s , edge: %s, state: %d" % (str(a), str(edge),state))
if edge and state is 0:
state = 1
timer=Timer(t1_2,time1_trans).start()
# stopped pressing the button but the timeout is not over
elif not edge and (state is 1 or state is 4 or state is 2):
state = 0
disable_all_timers()
stop_sirene1()
stop_sirene2()
try:
play_next()
except:
tell_gobbelz("Cannot play next song. Sorry:(")
tell_gobbelz("Bailing out")
sys.exit(1)
elif not edge and state is 5:
print("button released while removing music, all fine")
else:
print("this should never happen")
def disable_all_timers():
print("disabling all the timers")
global timer
try:
timer.cancel()
print("timer canceled")
except: pass
def start_sirene1():
print("start Sirene 1")
RPIO.output(loud1, True)
def start_sirene2():
print("starting Sirene 2")
RPIO.output(loud2, True)
def stop_sirene1():
print("stopping Sirene 1")
RPIO.output(loud1, False)
def stop_sirene2():
print("stopping Sirene 2")
RPIO.output(loud2, False)
def play_radio():
#TODO play radio
if r.get_current().get("file", "") == "http://ice.somafm.com/groovesalad":
print("will not skip own sender")
return
print("playing radio")
tell_gobbelz("Starting Radio Stream")
r.add_song("http://ice.somafm.com/groovesalad")
r.play_last()
def play_next():
print ("playing next song")
try:
#sanity
if is_last_song():
raise Exception("Last song in playlist")
r.next_song()
except:
print("no next song, starting radio")
play_radio()
def is_last_song():
return r.get_current()["Pos"] == r.get_last()["Pos"]
def delete_current_music():
print("delete current music")
current = r.get_current()
if not current:
print("Nothing is running, bailing out")
return
delete_remote_file(current)
play_next()
def delete_remote_file(current):
try:
sftp_delete_remote_file(current["file"])
say_song_killed(current.get("Title", "Unbekannter Title"),
current.get("Artist", "Unbekannter Kuenstler"))
except Exception as e:
print("Cannot delete remote file! ( %s ) " %str(e))
def sftp_delete_remote_file(f):
host = "mpd.shack"
port = 22
transport = paramiko.Transport((host, port))
username = 'shack'
passwd = 'shackit'
transport.connect(username=username, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport)
#print(sftp.stat('%s/%s'%(base_path,f)))
print(sftp.unlink('%s/%s' % (sftp_base_path, f)))
sftp.close()
transport.close()
def say_song_killed(name, author):
tell_gobbelz('%s von %s wurde vernichtet!' % (name, author) )
def tell_gobbelz(text):
import requests
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = {'text': text}
# curl -i -H "content-type: application/json"
# -X POST -d "{\"text\" : \"Hallo shackspace\"}" kiosk.shack:8080/say/
requests.post("http://kiosk.shack:8080/say/",
data=json.dumps(data), headers=headers)
if __name__ == "__main__":
from time import sleep
init_state()
print("initializing relaxxapi")
try:
r = relaxx(relaxxurl="http://lounge.mpd.shack/")
except:
tell_gobbelz("EM PE DE unreachable!")
tell_gobbelz("Bailing out")
sys.exit(1)
print("adding interrupt")
RPIO.add_interrupt_callback(button,callback=btn_trans,pull_up_down=RPIO.PUD_DOWN) #,debounce_timeout_ms=1
print ("Start Interrupt handler")
RPIO.wait_for_interrupts()
#Thread(target=start_hal,args=(hal_speed,)).start()
| [
[
[
44,
49
],
[
821,
826
],
[
1041,
1046
],
[
1325,
1330
]
],
[
[
50,
56
]
],
[
[
64,
68
],
[
4846,
4850
],
[
4913,
4917
],
[
4994,
4998
],
[
327,
331
],
[
345,
349
],
[
359,
363
],
[
377,
381
],
[
2150,
2154
],
[
2233,
2237
],
[
2315,
2319
],
[
2398,
2402
]
],
[
[
86,
89
]
],
[
[
97,
105
],
[
3704,
3712
],
[
3852,
3860
]
],
[
[
113,
117
],
[
4487,
4491
]
],
[
[
125,
128
],
[
4800,
4803
],
[
1740,
1743
]
],
[
[
146,
150
]
],
[
[
152,
157
]
],
[
[
190,
196
],
[
4653,
4659
]
],
[
[
197,
198
],
[
2471,
2472
],
[
2673,
2674
],
[
2725,
2726
],
[
2903,
2904
],
[
3028,
3029
],
[
3054,
3055
],
[
3153,
3154
]
],
[
[
206,
220
],
[
3976,
3990
]
],
[
[
245,
251
],
[
4874,
4880
]
],
[
[
256,
261
],
[
338,
343
],
[
2162,
2167
],
[
2327,
2332
]
],
[
[
267,
272
],
[
370,
375
],
[
2245,
2250
],
[
2410,
2415
]
],
[
[
279,
284
],
[
477,
482
],
[
752,
757
],
[
973,
978
],
[
1257,
1262
],
[
1281,
1286
],
[
1442,
1447
],
[
1456,
1461
],
[
1470,
1475
],
[
1787,
1792
]
],
[
[
295,
305
],
[
4582,
4592
]
],
[
[
395,
399
],
[
1331,
1335
]
],
[
[
404,
409
],
[
2031,
2036
]
],
[
[
415,
419
],
[
1047,
1051
]
],
[
[
424,
428
],
[
827,
831
]
],
[
[
438,
449
],
[
832,
843
]
],
[
[
696,
707
],
[
1052,
1063
]
],
[
[
917,
928
],
[
1336,
1347
]
],
[
[
1138,
1147
],
[
4890,
4899
]
],
[
[
1937,
1955
],
[
561,
579
],
[
1509,
1527
]
],
[
[
2100,
2113
],
[
1011,
1024
]
],
[
[
2180,
2193
],
[
790,
803
]
],
[
[
2263,
2275
],
[
515,
527
],
[
1538,
1550
]
],
[
[
2346,
2358
],
[
538,
550
],
[
1561,
1573
]
],
[
[
2429,
2439
],
[
2983,
2993
]
],
[
[
2745,
2754
],
[
1601,
1610
],
[
3289,
3298
]
],
[
[
3001,
3013
],
[
2826,
2838
]
],
[
[
3081,
3101
],
[
590,
610
]
],
[
[
3307,
3325
],
[
3257,
3275
]
],
[
[
3623,
3646
],
[
3353,
3376
]
],
[
[
4041,
4056
],
[
3402,
3417
]
],
[
[
4143,
4155
],
[
4718,
4730
],
[
4764,
4776
],
[
1641,
1653
],
[
1700,
1712
],
[
2631,
2643
],
[
4076,
4088
]
],
[
[
4572,
4577
]
],
[
[
4649,
4650
],
[
2471,
2472
],
[
2673,
2674
],
[
2725,
2726
],
[
2903,
2904
],
[
3028,
3029
],
[
3054,
3055
],
[
3153,
3154
]
],
[
[
497,
502
]
],
[
[
621,
626
]
],
[
[
772,
777
]
],
[
[
814,
819
]
],
[
[
993,
998
]
],
[
[
1035,
1040
]
],
[
[
1301,
1306
]
],
[
[
1319,
1324
]
],
[
[
1491,
1496
]
]
] |
import numpy as np
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QComboBox, QDoubleSpinBox, QLabel
from ...layers.utils._color_manager_constants import ColorMode
from ...utils.translations import trans
from ..utils import qt_signals_blocked
from ..widgets.qt_color_swatch import QColorSwatchEdit
from .qt_layer_controls_base import QtLayerControls
class QtVectorsControls(QtLayerControls):
"""Qt view and controls for the napari Vectors layer.
Parameters
----------
layer : napari.layers.Vectors
An instance of a napari Vectors layer.
Attributes
----------
edge_color_label : qtpy.QtWidgets.QLabel
Label for edgeColorSwatch
edgeColorSwatch : qtpy.QtWidgets.QFrame
Color swatch showing display color of vectors.
edgeComboBox : qtpy.QtWidgets.QComboBox
Dropdown widget to select display color for vectors.
color_mode_comboBox : qtpy.QtWidgets.QComboBox
Dropdown widget to select edge_color_mode for the vectors.
color_prop_box : qtpy.QtWidgets.QComboBox
Dropdown widget to select _edge_color_property for the vectors.
edge_prop_label : qtpy.QtWidgets.QLabel
Label for color_prop_box
grid_layout : qtpy.QtWidgets.QGridLayout
Layout of Qt widget controls for the layer.
layer : napari.layers.Vectors
An instance of a napari Vectors layer.
lengthSpinBox : qtpy.QtWidgets.QDoubleSpinBox
Spin box widget controlling line length of vectors.
Multiplicative factor on projections for length of all vectors.
widthSpinBox : qtpy.QtWidgets.QDoubleSpinBox
Spin box widget controlling edge line width of vectors.
"""
def __init__(self, layer):
super().__init__(layer)
self.layer.events.edge_width.connect(self._on_edge_width_change)
self.layer.events.length.connect(self._on_length_change)
self.layer.events.edge_color_mode.connect(
self._on_edge_color_mode_change
)
self.layer.events.edge_color.connect(self._on_edge_color_change)
# dropdown to select the property for mapping edge_color
color_properties = self._get_property_values()
color_prop_box = QComboBox(self)
color_prop_box.activated[str].connect(self.change_edge_color_property)
color_prop_box.addItems(color_properties)
self.color_prop_box = color_prop_box
self.edge_prop_label = QLabel(trans._('edge property:'))
# vector direct color mode adjustment and widget
self.edgeColorEdit = QColorSwatchEdit(
initial_color=self.layer.edge_color,
tooltip=trans._(
'click to set current edge color',
),
)
self.edgeColorEdit.color_changed.connect(self.change_edge_color_direct)
self.edge_color_label = QLabel(trans._('edge color:'))
self._on_edge_color_change()
# dropdown to select the edge color mode
colorModeComboBox = QComboBox(self)
color_modes = [e.value for e in ColorMode]
colorModeComboBox.addItems(color_modes)
colorModeComboBox.activated[str].connect(self.change_edge_color_mode)
self.color_mode_comboBox = colorModeComboBox
self._on_edge_color_mode_change()
# line width in pixels
self.widthSpinBox = QDoubleSpinBox()
self.widthSpinBox.setKeyboardTracking(False)
self.widthSpinBox.setSingleStep(0.1)
self.widthSpinBox.setMinimum(0.1)
self.widthSpinBox.setMaximum(np.inf)
self.widthSpinBox.setValue(self.layer.edge_width)
self.widthSpinBox.valueChanged.connect(self.change_width)
# line length
self.lengthSpinBox = QDoubleSpinBox()
self.lengthSpinBox.setKeyboardTracking(False)
self.lengthSpinBox.setSingleStep(0.1)
self.lengthSpinBox.setValue(self.layer.length)
self.lengthSpinBox.setMinimum(0.1)
self.lengthSpinBox.setMaximum(np.inf)
self.lengthSpinBox.valueChanged.connect(self.change_length)
# grid_layout created in QtLayerControls
# addWidget(widget, row, column, [row_span, column_span])
self.grid_layout.addWidget(QLabel(trans._('opacity:')), 0, 0)
self.grid_layout.addWidget(self.opacitySlider, 0, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('width:')), 1, 0)
self.grid_layout.addWidget(self.widthSpinBox, 1, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('length:')), 2, 0)
self.grid_layout.addWidget(self.lengthSpinBox, 2, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('blending:')), 3, 0)
self.grid_layout.addWidget(self.blendComboBox, 3, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('edge color mode:')), 4, 0)
self.grid_layout.addWidget(self.color_mode_comboBox, 4, 1, 1, 2)
self.grid_layout.addWidget(self.edge_color_label, 5, 0)
self.grid_layout.addWidget(self.edgeColorEdit, 5, 1, 1, 2)
self.grid_layout.addWidget(self.edge_prop_label, 6, 0)
self.grid_layout.addWidget(self.color_prop_box, 6, 1, 1, 2)
self.grid_layout.setRowStretch(7, 1)
self.grid_layout.setColumnStretch(1, 1)
self.grid_layout.setSpacing(4)
def change_edge_color_property(self, property: str):
"""Change edge_color_property of vectors on the layer model.
This property is the property the edge color is mapped to.
Parameters
----------
property : str
property to map the edge color to
"""
mode = self.layer.edge_color_mode
try:
self.layer.edge_color = property
self.layer.edge_color_mode = mode
except TypeError:
# if the selected property is the wrong type for the current color mode
# the color mode will be changed to the appropriate type, so we must update
self._on_edge_color_mode_change()
raise
def change_edge_color_mode(self, mode: str):
"""Change edge color mode of vectors on the layer model.
Parameters
----------
mode : str
Edge color for vectors. Must be: 'direct', 'cycle', or 'colormap'
"""
old_mode = self.layer.edge_color_mode
with self.layer.events.edge_color_mode.blocker():
try:
self.layer.edge_color_mode = mode
self._update_edge_color_gui(mode)
except ValueError:
# if the color mode was invalid, revert to the old mode
self.layer.edge_color_mode = old_mode
raise
def change_edge_color_direct(self, color: np.ndarray):
"""Change edge color of vectors on the layer model.
Parameters
----------
color : np.ndarray
Edge color for vectors, in an RGBA array
"""
self.layer.edge_color = color
def change_width(self, value):
"""Change edge line width of vectors on the layer model.
Parameters
----------
value : float
Line width of vectors.
"""
self.layer.edge_width = value
self.widthSpinBox.clearFocus()
self.setFocus()
def change_length(self, value):
"""Change length of vectors on the layer model.
Multiplicative factor on projections for length of all vectors.
Parameters
----------
value : float
Length of vectors.
"""
self.layer.length = value
self.lengthSpinBox.clearFocus()
self.setFocus()
def _update_edge_color_gui(self, mode: str):
"""Update the GUI element associated with edge_color.
This is typically used when edge_color_mode changes
Parameters
----------
mode : str
The new edge_color mode the GUI needs to be updated for.
Should be: 'direct', 'cycle', 'colormap'
"""
if mode in ('cycle', 'colormap'):
self.edgeColorEdit.setHidden(True)
self.edge_color_label.setHidden(True)
self.color_prop_box.setHidden(False)
self.edge_prop_label.setHidden(False)
elif mode == 'direct':
self.edgeColorEdit.setHidden(False)
self.edge_color_label.setHidden(False)
self.color_prop_box.setHidden(True)
self.edge_prop_label.setHidden(True)
def _get_property_values(self):
"""Get the current property values from the Vectors layer
Returns
-------
property_values : np.ndarray
array of all of the union of the property names (keys)
in Vectors.properties and Vectors._property_choices
"""
property_choices = [*self.layer._property_choices]
properties = [*self.layer.properties]
property_values = np.union1d(property_choices, properties)
return property_values
def _on_length_change(self):
"""Change length of vectors."""
with self.layer.events.length.blocker():
self.lengthSpinBox.setValue(self.layer.length)
def _on_edge_width_change(self):
"""Receive layer model width change event and update width spinbox."""
with self.layer.events.edge_width.blocker():
self.widthSpinBox.setValue(self.layer.edge_width)
def _on_edge_color_mode_change(self):
"""Receive layer model edge color mode change event & update dropdown."""
with qt_signals_blocked(self.color_mode_comboBox):
mode = self.layer._edge.color_mode
index = self.color_mode_comboBox.findText(
mode, Qt.MatchFixedString
)
self.color_mode_comboBox.setCurrentIndex(index)
self._update_edge_color_gui(mode)
def _on_edge_color_change(self):
"""Receive layer model edge color change event & update dropdown."""
if (
self.layer._edge.color_mode == ColorMode.DIRECT
and len(self.layer.data) > 0
):
with qt_signals_blocked(self.edgeColorEdit):
self.edgeColorEdit.setColor(self.layer.edge_color[0])
elif self.layer._edge.color_mode in (
ColorMode.CYCLE,
ColorMode.COLORMAP,
):
with qt_signals_blocked(self.color_prop_box):
prop = self.layer._edge.color_properties.name
index = self.color_prop_box.findText(prop, Qt.MatchFixedString)
self.color_prop_box.setCurrentIndex(index)
| [
[
[
7,
18
],
[
3520,
3522
],
[
3957,
3959
],
[
6672,
6674
],
[
8870,
8872
]
],
[
[
43,
45
],
[
9665,
9667
],
[
10471,
10473
]
],
[
[
73,
82
],
[
2206,
2215
],
[
2978,
2987
]
],
[
[
84,
98
],
[
3326,
3340
],
[
3704,
3718
]
],
[
[
100,
106
],
[
2427,
2433
],
[
2832,
2838
],
[
4184,
4190
],
[
4321,
4327
],
[
4455,
4461
],
[
4591,
4597
],
[
4729,
4735
]
],
[
[
161,
170
],
[
3034,
3043
],
[
9978,
9987
],
[
10232,
10241
],
[
10261,
10270
]
],
[
[
205,
210
],
[
2434,
2439
],
[
2635,
2640
],
[
2839,
2844
],
[
4191,
4196
],
[
4328,
4333
],
[
4462,
4467
],
[
4598,
4603
],
[
4736,
4741
]
],
[
[
231,
249
],
[
9495,
9513
],
[
10064,
10082
],
[
10309,
10327
]
],
[
[
288,
304
],
[
2548,
2564
]
],
[
[
341,
356
],
[
383,
398
]
],
[
[
365,
382
]
]
] |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.hardware_tapes_devices import HardwareTapesDevices # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestHardwareTapesDevices(unittest.TestCase):
"""HardwareTapesDevices unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHardwareTapesDevices(self):
"""Test HardwareTapesDevices"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0_1.models.hardware_tapes_devices.HardwareTapesDevices() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
[
[
259,
274
]
],
[
[
283,
291
],
[
482,
490
],
[
928,
936
]
],
[
[
300,
313
]
],
[
[
370,
390
]
],
[
[
436,
448
]
],
[
[
457,
481
]
]
] |
from dash import dcc, html
from dash.dependencies import Input, Output
from app import app
from layouts import index, record, watch, replay, about
# from examples.run import callback_example
from callbacks.record import *
from callbacks.watch import *
from callbacks.replay import *
layout = html.Article([
dcc.Location(id='url', refresh=False), # 定位地址栏
html.Section(id='page-content'), # 页面布局
])
@app.callback(Output('page-content', 'children'),
Input('url', 'pathname'))
def display_page(pathname):
if pathname == '/':
return index.layout
if pathname == '/record':
return record.layout
if pathname == '/watch':
return watch.layout
if pathname == '/replay':
return replay.layout
if pathname == '/about':
return about.layout
# elif pathname.startswith('/examples/'):
# return callback_example(pathname)
# else:
# return '404'
app.config.suppress_callback_exceptions = True # 用于支持多页应用
if __name__ == '__main__':
import asyncio
from dash_xinet.server import run_server
port = 7777
# app.run_server(debug=True, port=5555, threaded=True)
# app.run_server(app, debug=True, port=5555, threaded=True)
run = run_server(app, layout,
port=port, debug=True
)
asyncio.run(run)
else:
app.layout = layout
server = app.server # 用于 Dash 服务器部署
| [
[
[
17,
20
],
[
313,
316
]
],
[
[
22,
26
],
[
294,
298
],
[
365,
369
]
],
[
[
57,
62
],
[
475,
480
]
],
[
[
64,
70
],
[
425,
431
]
],
[
[
88,
91
],
[
412,
415
],
[
940,
943
],
[
1252,
1255
],
[
1362,
1365
],
[
1395,
1398
]
],
[
[
112,
117
],
[
568,
573
]
],
[
[
119,
125
],
[
626,
632
]
],
[
[
127,
132
],
[
684,
689
]
],
[
[
134,
140
],
[
742,
748
]
],
[
[
142,
147
],
[
800,
805
]
],
[
[
221,
222
]
],
[
[
251,
252
]
],
[
[
282,
283
]
],
[
[
285,
291
],
[
1257,
1263
],
[
1375,
1381
]
],
[
[
505,
517
]
],
[
[
1038,
1045
],
[
1335,
1342
]
],
[
[
1080,
1090
],
[
1241,
1251
]
],
[
[
1096,
1100
],
[
1291,
1295
]
],
[
[
1235,
1238
],
[
1347,
1350
]
],
[
[
1386,
1392
]
]
] |
import numpy as np
import pandas as pd
import os
import random
import math
from itertools import repeat
import itertools
import sys, copy, shutil
import subprocess
from multiprocessing.dummy import Pool
from collections import defaultdict
import copy
import random
import matplotlib.pyplot as plt
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
class ESTUNE:
"""
A class to parse neorl input template and construct cases for evolution strategy (ES) hyperparameter optimisation
inputs:
The template input file
Class object from PARSER.py, featuring user input for TUNE
neorl logo
"""
def __init__(self, tuneclass, inputfile, tuneblock, logo):
self.logo=logo
self.inputfile=inputfile
self.tuneblock=tuneblock
self.n_last_episodes=int(self.tuneblock["n_last_episodes"])
self.ncores=int(self.tuneblock["ncores"])
self.ncases=int(self.tuneblock["ncases"])
#---------------------------------------
# define genetic algorithm parameters
#---------------------------------------
self.popsize=10
if self.ncases < self.popsize:
self.ngens=1
else:
self.ngens=int(self.ncases/self.popsize)
self.MU=5
if tuneclass == 'gatune': # ES/GA tune
print("Performing semi-GA Tune")
self.INDPB=0.1
elif tuneclass == 'estune': # ES tune
print("Performing ES Tune")
self.INDPB=1.0
else: # default setting is ES tune
print("Performing ES Tune")
self.INDPB=1.0
self.CXPB=0.5
self.MUTPB=0.2
self.ETA=0.6
self.SMAX=0.5
self.paramvals=dict()
self.paraminds=dict()
self.datatypes=[]
#-------------------------------
# construct results directory
#-------------------------------
if os.path.exists('./tunecases/'):
shutil.rmtree('./tunecases/')
os.makedirs('./tunecases/', exist_ok=True)
else:
os.makedirs('./tunecases/', exist_ok=True)
self.csvlogger='tune.csv'
self.tunesummary='tunesummary.txt'
#---------------------------------
# parse the input template
#---------------------------------
with open (self.inputfile, 'r') as input_file_text:
self.template=input_file_text.readlines()
first=0; last=0
for i in range(len(self.template)):
if ('READ TUNE' in self.template[i]):
first=i
if ('END TUNE' in self.template[i]):
last=i
if first == 0 and last ==0:
raise ('TUNE card cannot be found')
del self.template[first: last+1]
self.template="".join(self.template)
def tune_count(self):
"""
1- This function uses self.tuneblock, parse it, infer all parameters to be tuned and thier distribution
2- This function creates GA engine and instantiates the initial population for evolution algorithm
"""
self.param_dict={}
for item in self.tuneblock:
if '{' in item and '}' in item and item[0] != '#':
#-----------------------------------------------------
# check the existence of the name in the template
#-----------------------------------------------------
if item not in self.template:
raise ValueError('parameter {} in TUNE block cannot be found in any other block, e.g. DQN, GA, PPO, etc.'.format(item))
item_lst=self.tuneblock[item].split(",")
item_lst=[item.strip() for item in item_lst] # get rid of white spaces in the splitted values
#-------------------------------------------------------
# check if a uniform distribution of floats is identified
#-------------------------------------------------------
try:
if "float" in item_lst:
item_lst[0]=float(item_lst[0])
item_lst[1]=float(item_lst[1])
self.datatypes.append("float")
print ('-- debug: parameter {} has uniform distribution of type --float-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
elif "u" in item_lst:
item_lst[0]=float(item_lst[0])
item_lst[1]=float(item_lst[1])
self.datatypes.append("float")
print ('-- debug: parameter {} has uniform distribution of type --float-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
except:
raise Exception ('--error: TUNE cannot construct the user-given uniform distribution of --floats-- for {} according to (low, high, u) syntax'.format(item))
#---------------------------------------------------
# check if a random integer distribution is identified
#---------------------------------------------------
try:
if "int" in item_lst:
item_lst[0]=int(item_lst[0])
item_lst[1]=int(item_lst[1])
self.datatypes.append("int")
print ('-- debug: parameter {} has uniform distribution of type --int-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
elif "randint" in item_lst:
item_lst[0]=int(item_lst[0])
item_lst[1]=int(item_lst[1])
self.datatypes.append("int")
print ('-- debug: parameter {} has uniform distribution of type --int-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
except:
raise Exception ('--error: TUNE cannot construct the user-given uniform distribution of --int-- for {} according to (low, high, u) syntax'.format(item))
#-----------------------------------------------------
# check if a grid is identified
#-----------------------------------------------------
try:
if "grid" in item_lst:
element_lst=[]
for element in item_lst:
# check if it is an integer
not_int=0
try:
element_lst.append(int(element.strip()))
except Exception:
not_int=1
# else check if the elment is float
if not_int:
try:
element_lst.append(float(element.strip()))
# else consider it a string
except Exception:
element_lst.append(str(element.strip()))
item_lst=element_lst
self.datatypes.append("grid")
print ('-- debug: parameter {} has grid type with values {}'.format(item,item_lst))
except:
raise Exception ('--error: TUNE cannot construct the user-given grid for {} according to the comma-seperated syntax'.format(item))
self.param_dict[item]=item_lst # Save the final parsed list for parameter {XXX}
#-----------------------------------------------------
# infer the bounds for strategy vector
#-----------------------------------------------------
if len(self.param_dict.keys()) <= 10:
self.SMIN=0.1
else:
self.SMIN=1/(len(self.param_dict.keys()))
def gen_cases(self, x=0):
"""
This function infers neorl.py path
"""
self.tune_count()
self.param_names=list(self.param_dict.keys())
#-----------------------
# Infer neorl.py path
#-----------------------
# Find neorl path
#self.here=os.path.dirname(os.path.abspath(__file__))
#self.neorl_path=self.here.replace('src/tune','neorl.py') #try to infer neorl.py internally to call neorl inside or neorl
#self.python_path=self.here.replace('neorl/src/tune','anaconda3/bin/python3') #try to infer python3 path to call neorl inside or neorl
self.neorl_path=sys.argv[0]
self.python_path=sys.executable
print('--debug: NEORLPATH=', self.neorl_path)
print('--debug: PYTHONPATH=', self.python_path)
def GenES(self):
"""
Individual generator:
1- This function uses self.param_dict to obtain bounds for individual parameters
Returns:
-ind (list): an individual vector with values samples from inferred distribution
-strategy (list): the strategy vector with values between smin and smax
"""
size=len(self.param_dict.keys()) # size of individual
content=[]
self.LOW=[] # Lower bounds for the parameters to be tuned
self.UP=[] # Upper bounds for parameters to be tuned
for key in list(self.param_dict.keys()):
if 'int' in self.param_dict[key]:
content.append(random.randint(self.param_dict[key][0], self.param_dict[key][1]))
elif 'randint' in self.param_dict[key]:
content.append(random.randint(self.param_dict[key][0], self.param_dict[key][1]))
elif 'float' in self.param_dict[key]:
content.append(random.uniform(self.param_dict[key][0], self.param_dict[key][1]))
elif 'u' in self.param_dict[key]:
content.append(random.uniform(self.param_dict[key][0], self.param_dict[key][1]))
elif 'grid' in self.param_dict[key]:
self.real_grid=list(self.param_dict[key])
self.real_grid.remove('grid') # get rid of the 'grid' to avoid sampling it
self.paramvals[key]=self.real_grid
content.append(random.sample(self.real_grid, 1)[0])
self.paraminds[len(content)-1]=key
else:
raise Exception('unknown data type is given, either int/randint, float/u, or grid are allowed for parameter distribution types')
self.LOW.append(self.param_dict[key][0])
self.UP.append(self.param_dict[key][1])
ind=list(content)
size = len(list(self.param_dict.keys()))
strategy= [random.uniform(self.SMIN, self.SMAX) for _ in range(size)]
return ind, strategy
def init_pop(self):
"""
Population initializer
Returns:
-pop (dict): initial population in a dictionary form
"""
# initialize the population and strategy and run them in parallel (these samples will be used to initialize the memory)
pop=defaultdict(list)
for i in range(self.popsize):
#caseid='es_gen{}_ind{}'.format(0,i+1)
data=self.GenES()
pop[i].append(data[0])
pop[i].append(data[1])
if self.ncores > 1: # evaluate warmup in parallel
core_list=[]
for key in pop:
caseid='ind{}'.format(key+1)
core_list.append([pop[key][0], caseid])
p=Pool(self.ncores)
fitness=p.map(self.gen_object, core_list)
p.close(); p.join()
[pop[ind].append(fitness[ind]) for ind in range(len(pop))]
else: # evaluate warmup in series
for key in pop:
caseid='ind{}'.format(key+1)
fitness=self.fit(pop[key][0], caseid)
pop[key].append(fitness)
return pop # return final pop dictionary with ind, strategy, and fitness
def fit(self, ind, caseid):
"""
This function evaluates an individual's fitness
Inputs:
-ind (list): an individual whose fitness to evaluate
-caseid (str): a string that specifies the given individual
Returns:
-mean_reward (float): fitness value
"""
try:
#---------------------------------------------
# Prepares directories and files for one case
# --------------------------------------------
self.param_names=list(self.param_dict.keys())
i = caseid[3:]
os.makedirs('./tunecases/case{}'.format(i), exist_ok=True)
self.new_template=copy.deepcopy(self.template)
for j in range (len(self.param_names)):
self.new_template=self.new_template.replace(str(self.param_names[j]), str(ind[j]))
filename='./tunecases/case{}/case{}.inp'.format(i, i)
with open (filename, 'w') as fout:
fout.writelines(self.new_template)
# copy external files into the new directory, if extfiles card exists
if 'extfiles' in self.tuneblock.keys():
if self.tuneblock['extfiles']:
print('--debug: external files are identified, copying them into each case directory')
for item in self.tuneblock['extfiles']:
os.system('cp -r {} ./tunecases/case{}/'.format(item, i))
casenum = caseid[3:]
print('--------------------------------------------------')
print('Running TUNE Case {}/{}: {}'.format(casenum, self.ncases, ind))
subprocess.call([self.python_path, self.neorl_path, '-i', 'case{}.inp'.format(casenum)], cwd='./tunecases/case{}/'.format(casenum)) # this exceutes neorl for this case.inp
print('--------------------------------------------------')
#--------------------------------------------------------------------------------------------------------------
# Try to infer the _out.csv file in the directory since only one method is allowed
csvfile=[f for f in os.listdir('./tunecases/case{}/case{}_log/'.format(casenum, casenum)) if f.endswith('_out.csv')]
if len(csvfile) > 1:
raise Exception ('multiple *_out.csv files can be found in the logger of TUNE, only one is allowed')
#--------------------------------------------------------------------------------------------------------------
reward_lst=pd.read_csv('./tunecases/case{}/case{}_log/{}'.format(casenum,casenum, csvfile[0]), usecols=['reward']).values
mean_reward=np.mean(reward_lst[-self.n_last_episodes:])
max_reward=np.max(reward_lst)
with open (self.csvlogger, 'a') as fout:
fout.write(str(casenum) +',')
[fout.write(str(item) + ',') for item in ind]
fout.write(str(mean_reward) + ',' + str(max_reward) + '\n')
return mean_reward
except:
print('--error: case{}.inp failed during execution'.format(casenum))
return 'case{}.inp:failed'.format(casenum)
def gen_object(self, inp):
"""
This is a worker for the multiprocess Pool
Inputs:
-inp (list of lists): contains data for each core [[ind1, caseid1], ..., [indN, caseidN]]
Returns:
-fitness value (float)
"""
return self.fit(inp[0], inp[1])
def select(self, pop):
"""
Selection function sorts the population from max to min based on fitness and selects the k best
Inputs:
-pop (dict): population in dictionary structure
-k (int): top k individuals are selected
Returns:
-best_dict (dict): the new orded dictionary with top k selected
"""
k=self.MU
pop=list(pop.items())
pop.sort(key=lambda e: e[1][2], reverse=True)
sorted_dict=dict(pop[:k])
# This block creates a new dict where keys are reset to 0 ... k in order to avoid unordered keys after sort
best_dict=defaultdict(list)
index=0
for key in sorted_dict:
best_dict[index].append(sorted_dict[key][0])
best_dict[index].append(sorted_dict[key][1])
best_dict[index].append(sorted_dict[key][2])
index+=1
sorted_dict.clear()
return best_dict
def cx(self, ind1, ind2, strat1, strat2):
"""
Executes a classical two points crossover on both the individuals and their strategy.
The individuals/strategies should be a list. The crossover points for the individual and the
strategy are the same.
Inputs:
-ind1 (list): The first individual participating in the crossover.
-ind2 (list): The second individual participating in the crossover.
-strat1 (list): The first evolution strategy participating in the crossover.
-strat2 (list): The second evolution strategy
Returns:
- The new ind1, ind2, strat1, strat2, after crossover in list form
"""
#for item in ind1:
# print('individual 1', type(item))
#for item in ind2:
# print('individual 2', type(item))
#for item in strat1:
# print('strategy 1', type(item))
#for item in strat2:
# print('strategy 2', type(item))
size = min(len(ind1), len(ind2))
pt1 = random.randint(1, size)
pt2 = random.randint(1, size-1)
if pt2 >= pt1:
pt2 +=1
else:
pt1, pt2 = pt2, pt1
ind1[pt1:pt2], ind2[pt1:pt2] = ind2[pt1:pt2], ind1[pt1:pt2]
strat1[pt1:pt2], strat2[pt1:pt2] = strat2[pt1:pt2], strat1[pt1:pt2]
return ind1, ind2, strat1, strat2
def mutES(self, ind, strat):
"""
Mutate an evolution strategy according to mixed Discrete/Continuous mutation rules
Input:
-ind (list): individual to be mutated
-strat (list): individual strategy to be mutated
Returns:
-ind (list): new individual after mutation
-strat (list): individual strategy after mutation
"""
size=len(ind)
tau=1/np.sqrt(2*size)
tau_prime=1/np.sqrt(2*np.sqrt(size))
for i in range(size):
# Grid distribution received
if self.datatypes[i] == "grid":
#if i in self.paraminds.keys():
norm=random.gauss(0,1)
# modify the ind strategy
strat[i] = 1/(1+(1-strat[i])/strat[i]*np.exp(-tau*norm-tau_prime*random.gauss(0,1)))
# make a transformation of strategy to ensure it is between smin, smax
y=(strat[i]-self.SMIN)/(self.SMAX-self.SMIN)
if np.floor(y) % 2 == 0:
y_prime=np.abs(y-np.floor(y))
else:
y_prime=1-np.abs(y-np.floor(y))
strat[i] = self.SMIN + (self.SMAX-self.SMIN)*y_prime
# check if this attribute is mutated based on the updated strategy
if random.random() < strat[i]:
# make a list of possibilities after excluding the current value to enforce mutation
paramname=self.paraminds[i]
ind[i]=random.sample(self.paramvals[paramname], 1)[0]
# Random integer distribution received
elif self.datatypes[i] == "int":
norm=random.gauss(0,1)
# modify the ind strategy
strat[i] = 1/(1+(1-strat[i])/strat[i]*np.exp(-tau*norm-tau_prime*random.gauss(0,1)))
# make a transformation of strategy to ensure it is between smin, smax
y=(strat[i]-self.SMIN)/(self.SMAX-self.SMIN)
if np.floor(y) % 2 == 0:
y_prime=np.abs(y-np.floor(y))
else:
y_prime=1-np.abs(y-np.floor(y))
strat[i] = self.SMIN + (self.SMAX-self.SMIN)*y_prime
# check if this attribute is mutated based on the updated strategy
#if random.random() < strat[i]:
# make a list of possibilities after excluding the current value to enforce mutation
choices=list(range(self.LOW[i], self.UP[i]+1))
choices.remove(ind[i])
ind[i] = random.choice(choices)
# Uniform float distribution received
elif self.datatypes[i] == "float":
norm=random.gauss(0,1)
if random.random() < self.INDPB: # this indicates whether ind/strategy to be mutated or not for this float variable
strat[i] *= np.exp(tau*norm + tau_prime * random.gauss(0,1)) # normal mutation strategy
ind[i] += strat[i] * random.gauss(0,1) # update the individual position
# check if the new individual falls within lower/uppder boundaries
if ind[i] < self.LOW[i]:
ind[i] = self.LOW[i]
if ind[i] > self.UP[i]:
ind[i] = self.UP[i]
else:
raise Exception('ES mutation strategy works with int, float, or grid distributions, the type provided cannot be interpreted')
return ind, strat
def GenOffspring(self, pop):
"""
This function generates the offspring by applying crossover, mutation, OR reproduction.
Inputs:
-pop (dict): population in dictionary structure
Returns:
-offspring (dict): new modified population in dictionary structure
"""
pop_indices=list(range(0,len(pop)))
offspring=defaultdict(list)
for i in range(self.popsize):
alpha=random.random()
#----------------------
# Crossover
#----------------------
if alpha < self.CXPB:
index1, index2=random.sample(pop_indices,2)
ind1, ind2, strat1, strat2=self.cx(ind1=list(pop[index1][0]), ind2=list(pop[index2][0]),
strat1=list(pop[index1][1]), strat2=list(pop[index2][1]))
offspring[i].append(ind1)
offspring[i].append(strat1)
#print('crossover is done for sample {} between {} and {}'.format(i,index1,index2))
#----------------------
# Mutation
#----------------------
elif alpha < self.CXPB + self.MUTPB: # Apply mutation
index = random.choice(pop_indices)
ind, strat=self.mutES(ind=list(pop[index][0]), strat=list(pop[index][1]))
offspring[i].append(ind)
offspring[i].append(strat)
#print('mutation is done for sample {} based on {}'.format(i,index))
#------------------------------
# Reproduction from population
#------------------------------
else:
index=random.choice(pop_indices)
offspring[i].append(pop[index][0])
offspring[i].append(pop[index][1])
#print('reproduction is done for sample {} based on {}'.format(i,index))
return offspring
def run_cases(self):
"""
This function runs the evolutioanry algorithm over self.ngens generations.
"""
#------------------------------
# Begin the evolution process
#------------------------------
with open (self.csvlogger, 'w') as fout:
fout.write('caseid, ')
[fout.write(item + ',') for item in self.param_names]
fout.write('mean_reward,max_reward\n')
#print('PARAM dict', self.param_dict)
#print('PARAM types', self.datatypes)
self.population=self.init_pop()
case_idx=0
self.currentcase=self.popsize+1
for gen in range(1, self.ngens):
case_idx=0
caseids=['ind{}'.format(ind) for ind in range(self.currentcase, self.currentcase+self.popsize+1)]
# Vary the population and generate new offspring
offspring=self.GenOffspring(pop=self.population)
# Evaluate the individuals with invalid fitness using multiprocessing Pool
if self.ncores > 1:
core_list=[]
for key in offspring:
core_list.append([offspring[key][0], caseids[case_idx]])
case_idx+=1
# initialize a pool
p=Pool(self.ncores)
fitness=p.map(self.gen_object, core_list)
p.close(); p.join()
[offspring[ind].append(fitness[ind]) for ind in range(len(offspring))]
else:
for ind in range(len(offspring)):
fitness=self.fit(offspring[ind][0], caseids[case_idx])
case_idx+=1
offspring[ind].append(fitness)
self.currentcase+=self.popsize
# Select the next generation population
self.population = copy.deepcopy(self.select(pop=offspring))
csvdata=pd.read_csv('tune.csv')
asc_data=csvdata.sort_values(by=['caseid'],ascending=True)
des_data=csvdata.sort_values(by=['mean_reward'],ascending=False)
des_data2=csvdata.sort_values(by=['max_reward'],ascending=False)
asc_data.to_csv('tune.csv', index=False)
mean = np.mean(des_data.iloc[:,4:5])
totalmean=mean.tolist()[0]
try:
failed_cases=len([print ('failed') for item in self.population if isinstance(item, str)])
except:
failed_cases='NA'
print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('Mean Rewards for all cases=', totalmean)
print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print ('All TUNE CASES ARE COMPLETED')
print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('--debug: Check tunesummary.txt file for best hyperparameters found')
print('--debug: Check tune.csv file for complete csv logger of all cases results')
print('--debug: Check tunecases directory for case-by-case detailed results')
with open ('tunesummary.txt', 'w') as fout:
fout.write(self.logo)
fout.write('*****************************************************\n')
fout.write('Summary for the TUNE case \n')
fout.write('*****************************************************\n')
fout.write('Number of cases evaluated: {} \n'.format(self.ncases))
fout.write('Number of failed cases: {} \n'.format(failed_cases))
fout.write('Parameter names: {} \n'.format(self.param_names))
fout.write('Parameter values: {} \n '.format(self.param_dict))
fout.write ('--------------------------------------------------------------------------------------\n')
if des_data.shape[0] < 20:
top=des_data.shape[0]
fout.write ('Top {} hyperparameter configurations ranked according to MEAN reward \n'.format(top))
fout.write(des_data.iloc[:top].to_string(index=False))
else:
top=20
fout.write ('Top {} hyperparameter configurations ranked according to MEAN reward \n'.format(top))
fout.write(des_data.iloc[:top].to_string(index=False))
fout.write ('\n')
fout.write ('--------------------------------------------------------------------------------------\n')
if des_data2.shape[0] < 20:
top=des_data2.shape[0]
fout.write ('Top {} hyperparameter configurations ranked according to MAX reward \n'.format(top))
fout.write(des_data2.iloc[:top].to_string(index=False))
else:
top=20
fout.write ('Top {} hyperparameter configurations ranked according to MAX reward \n'.format(top))
fout.write(des_data2.iloc[:top].to_string(index=False)) | [
[
[
7,
18
],
[
14945,
14947
],
[
15012,
15014
],
[
18699,
18701
],
[
18735,
18737
],
[
18745,
18747
],
[
19063,
19065
],
[
19278,
19280
],
[
19328,
19330
],
[
19337,
19339
],
[
19402,
19404
],
[
19411,
19413
],
[
20084,
20086
],
[
20299,
20301
],
[
20349,
20351
],
[
20358,
20360
],
[
20423,
20425
],
[
20432,
20434
],
[
21199,
21201
],
[
26051,
26053
]
],
[
[
26,
38
],
[
14810,
14812
],
[
25749,
25751
]
],
[
[
46,
48
],
[
1959,
1961
],
[
2045,
2047
],
[
2114,
2116
],
[
12813,
12815
],
[
13648,
13650
],
[
14416,
14418
]
],
[
[
56,
62
]
],
[
[
70,
74
]
],
[
[
97,
103
]
],
[
[
111,
120
]
],
[
[
128,
131
],
[
8763,
8766
],
[
8800,
8803
]
],
[
[
133,
137
]
],
[
[
139,
145
],
[
2003,
2009
]
],
[
[
153,
163
],
[
13907,
13917
]
],
[
[
198,
202
],
[
11717,
11721
],
[
25116,
25120
]
],
[
[
227,
238
],
[
11266,
11277
],
[
16489,
16500
],
[
22235,
22246
]
],
[
[
246,
250
],
[
12902,
12906
],
[
25681,
25685
]
],
[
[
259,
265
],
[
9635,
9641
],
[
9784,
9790
],
[
9931,
9937
],
[
10074,
10080
],
[
10420,
10426
],
[
10870,
10876
],
[
17898,
17904
],
[
17936,
17942
],
[
18949,
18955
],
[
19090,
19096
],
[
19596,
19602
],
[
19804,
19810
],
[
19969,
19975
],
[
20111,
20117
],
[
20875,
20881
],
[
21017,
21023
],
[
21054,
21060
],
[
21229,
21235
],
[
21316,
21322
],
[
22309,
22315
],
[
22486,
22492
],
[
23106,
23112
],
[
23580,
23586
]
],
[
[
273,
297
]
],
[
[
337,
345
]
],
[
[
394,
402
]
],
[
[
410,
416
]
]
] |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.add_argument('--input_dir', dest='input_dir',
help='directory to save models',
type=str)
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}/{}_ls.yml".format(args.dataset, args.net) if args.large_scale else "cfgs/{}/{}.yml".format(
args.dataset, args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.input_dir
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.05
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in range(num_images):
data = next(data_iter)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
_ = torch.from_numpy(np.tile(boxes, (1, scores.shape[1])))
pred_boxes = _.cuda() if args.cuda > 0 else _
pred_boxes /= data[1][0][2].item()
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in xrange(1, imdb.num_classes):
inds = torch.nonzero(scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes[j][i] = cls_dets.cpu().numpy()
else:
all_boxes[j][i] = empty_array
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in xrange(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
if vis:
cv2.imwrite('result.png', im2show)
pdb.set_trace()
#cv2.imshow('test', im2show)
#cv2.waitKey(0)
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
end = time.time()
print("test time: %0.4fs" % (end - start))
| [
[
[
297,
312
]
],
[
[
336,
344
]
],
[
[
368,
382
]
],
[
[
391,
402
]
],
[
[
410,
412
],
[
5719,
5721
],
[
5849,
5851
],
[
8145,
8147
]
],
[
[
420,
423
],
[
11828,
11831
],
[
11958,
11961
]
],
[
[
431,
442
],
[
4043,
4045
],
[
8225,
8227
],
[
8238,
8240
],
[
9897,
9899
],
[
10284,
10286
],
[
11340,
11342
],
[
11526,
11528
],
[
11641,
11643
]
],
[
[
450,
458
],
[
1259,
1267
],
[
1922,
1930
]
],
[
[
466,
472
],
[
5463,
5469
]
],
[
[
480,
483
],
[
6539,
6542
],
[
12047,
12050
]
],
[
[
491,
495
],
[
7413,
7417
],
[
8098,
8102
],
[
8119,
8123
],
[
8585,
8589
],
[
10122,
10126
],
[
10189,
10193
],
[
11772,
11776
],
[
12313,
12317
]
],
[
[
504,
507
],
[
10230,
10233
],
[
12002,
12005
]
],
[
[
516,
521
],
[
3910,
3915
],
[
6651,
6656
],
[
6903,
6908
],
[
6936,
6941
],
[
6971,
6976
],
[
7004,
7009
],
[
7890,
7895
],
[
9179,
9184
],
[
9266,
9271
],
[
9450,
9455
],
[
9537,
9542
],
[
9880,
9885
],
[
10357,
10362
],
[
10525,
10530
],
[
10746,
10751
]
],
[
[
549,
557
],
[
7212,
7220
],
[
7242,
7250
],
[
7274,
7282
],
[
7307,
7315
]
],
[
[
565,
579
]
],
[
[
587,
607
]
],
[
[
615,
621
],
[
12169,
12175
],
[
12195,
12201
]
],
[
[
655,
669
],
[
5556,
5570
]
],
[
[
712,
726
],
[
7746,
7760
]
],
[
[
758,
761
],
[
3717,
3720
],
[
3752,
3755
],
[
3786,
3789
],
[
4058,
4061
],
[
5477,
5480
],
[
5485,
5488
],
[
6769,
6772
],
[
7347,
7350
],
[
8851,
8854
],
[
8970,
8973
],
[
9197,
9200
],
[
9284,
9287
],
[
9468,
9471
],
[
9555,
9558
],
[
10932,
10935
]
],
[
[
763,
776
],
[
5341,
5354
]
],
[
[
778,
791
],
[
5406,
5419
]
],
[
[
793,
807
],
[
7702,
7716
]
],
[
[
845,
855
],
[
9757,
9767
]
],
[
[
890,
893
],
[
10918,
10921
]
],
[
[
931,
949
],
[
9693,
9711
]
],
[
[
984,
992
]
],
[
[
994,
1002
]
],
[
[
1004,
1018
],
[
11044,
11058
]
],
[
[
1056,
1061
],
[
6041,
6046
]
],
[
[
1099,
1105
],
[
6161,
6167
],
[
6286,
6292
],
[
6411,
6417
]
],
[
[
1165,
1171
],
[
7660,
7666
],
[
7616,
7622
],
[
10311,
10317
],
[
11419,
11425
],
[
11587,
11593
]
],
[
[
1198,
1208
],
[
3847,
3857
]
],
[
[
3712,
3714
]
],
[
[
3741,
3749
]
],
[
[
3771,
3783
]
],
[
[
3840,
3844
],
[
3898,
3902
],
[
3944,
3948
],
[
4077,
4081
],
[
4113,
4117
],
[
4156,
4160
],
[
4198,
4202
],
[
4284,
4288
],
[
4325,
4329
],
[
4386,
4390
],
[
4428,
4432
],
[
4514,
4518
],
[
4544,
4548
],
[
4611,
4615
],
[
4657,
4661
],
[
4746,
4750
],
[
4780,
4784
],
[
4820,
4824
],
[
4861,
4865
],
[
4947,
4951
],
[
4975,
4979
],
[
5023,
5027
],
[
5072,
5076
],
[
5227,
5231
],
[
5200,
5204
],
[
5214,
5218
],
[
5280,
5284
],
[
5294,
5298
],
[
5157,
5161
],
[
5310,
5314
],
[
5355,
5359
],
[
5375,
5379
],
[
5420,
5424
],
[
5571,
5575
],
[
5695,
5699
],
[
5911,
5915
],
[
5930,
5934
],
[
5947,
5951
],
[
6003,
6007
],
[
6094,
6098
],
[
6122,
6126
],
[
6220,
6224
],
[
6248,
6252
],
[
6344,
6348
],
[
6372,
6376
],
[
6470,
6474
],
[
7048,
7052
],
[
7332,
7336
],
[
7369,
7373
],
[
7456,
7460
],
[
9104,
9108
],
[
9970,
9974
],
[
10572,
10576
]
],
[
[
5517,
5521
],
[
5599,
5603
],
[
6047,
6051
],
[
6168,
6172
],
[
6293,
6297
],
[
6418,
6422
],
[
7570,
7574
],
[
7667,
7671
],
[
7717,
7721
],
[
7822,
7826
],
[
9654,
9658
],
[
10241,
10245
],
[
10321,
10325
],
[
11068,
11072
],
[
11429,
11433
],
[
11597,
11601
],
[
12256,
12260
]
],
[
[
5523,
5528
],
[
5671,
5676
],
[
7761,
7766
]
],
[
[
5530,
5540
],
[
7768,
7778
]
],
[
[
5542,
5553
],
[
7780,
7791
]
],
[
[
5683,
5692
],
[
5734,
5743
],
[
5824,
5833
],
[
5862,
5871
]
],
[
[
5837,
5846
],
[
6624,
6633
],
[
6662,
6671
]
],
[
[
6028,
6038
],
[
6558,
6568
],
[
6675,
6685
],
[
7384,
7394
],
[
8191,
8201
],
[
8726,
8736
]
],
[
[
6148,
6158
],
[
6558,
6568
],
[
6675,
6685
],
[
7384,
7394
],
[
8191,
8201
],
[
8726,
8736
]
],
[
[
6273,
6283
],
[
6558,
6568
],
[
6675,
6685
],
[
7384,
7394
],
[
8191,
8201
],
[
8726,
8736
]
],
[
[
6398,
6408
],
[
6558,
6568
],
[
6675,
6685
],
[
7384,
7394
],
[
8191,
8201
],
[
8726,
8736
]
],
[
[
6638,
6648
],
[
6702,
6712
],
[
6746,
6756
],
[
6788,
6798
]
],
[
[
6893,
6900
],
[
7073,
7080
],
[
7221,
7228
]
],
[
[
6926,
6933
],
[
7102,
7109
],
[
7251,
7258
]
],
[
[
6959,
6968
],
[
7133,
7142
],
[
7283,
7292
]
],
[
[
6993,
7001
],
[
7165,
7173
],
[
7316,
7324
]
],
[
[
7063,
7070
],
[
7221,
7228
]
],
[
[
7092,
7099
],
[
7251,
7258
]
],
[
[
7121,
7130
],
[
7283,
7292
]
],
[
[
7154,
7162
],
[
7316,
7324
]
],
[
[
7202,
7209
],
[
8339,
8346
],
[
8737,
8744
]
],
[
[
7232,
7239
],
[
8397,
8404
],
[
8746,
8753
],
[
9780,
9787
]
],
[
[
7262,
7271
],
[
8514,
8523
],
[
8765,
8774
]
],
[
[
7296,
7304
],
[
8455,
8463
],
[
8755,
8763
]
],
[
[
7405,
7410
],
[
12362,
12367
]
],
[
[
7427,
7440
],
[
11296,
11309
],
[
11482,
11495
],
[
11549,
11562
]
],
[
[
7450,
7453
],
[
7471,
7474
],
[
10210,
10213
],
[
11015,
11018
],
[
11987,
11990
]
],
[
[
7480,
7486
],
[
10383,
10389
]
],
[
[
7506,
7512
],
[
10383,
10389
]
],
[
[
7522,
7531
],
[
7723,
7732
]
],
[
[
7553,
7563
],
[
7623,
7633
],
[
8290,
8300
],
[
11916,
11926
]
],
[
[
7590,
7599
],
[
11126,
11135
],
[
11195,
11204
],
[
11351,
11360
],
[
11650,
11659
],
[
11729,
11738
],
[
11711,
11720
],
[
12181,
12190
],
[
12281,
12290
]
],
[
[
7689,
7699
],
[
8158,
8168
],
[
12292,
12302
]
],
[
[
7736,
7743
],
[
7918,
7925
]
],
[
[
7877,
7887
],
[
8064,
8074
]
],
[
[
8047,
8056
],
[
8322,
8331
]
],
[
[
8079,
8081
]
],
[
[
8134,
8142
],
[
12141,
12149
]
],
[
[
8211,
8222
],
[
11213,
11224
]
],
[
[
8279,
8280
],
[
10260,
10261
],
[
11139,
11140
],
[
11208,
11209
],
[
11364,
11365
],
[
11663,
11664
],
[
11742,
11743
],
[
11724,
11725
],
[
11909,
11910
]
],
[
[
8310,
8314
],
[
8360,
8364
],
[
8382,
8386
],
[
8418,
8422
],
[
8440,
8444
],
[
8477,
8481
],
[
8499,
8503
],
[
8537,
8541
],
[
8559,
8563
],
[
10012,
10016
]
],
[
[
8575,
8582
],
[
10164,
10171
]
],
[
[
8603,
8607
],
[
8820,
8824
]
],
[
[
8609,
8617
],
[
8792,
8800
]
],
[
[
8619,
8628
],
[
8942,
8951
]
],
[
[
8638,
8650
]
],
[
[
8652,
8664
]
],
[
[
8674,
8687
]
],
[
[
8689,
8703
]
],
[
[
8713,
8723
]
],
[
[
8783,
8789
],
[
9916,
9922
],
[
10049,
10055
]
],
[
[
8812,
8817
],
[
9712,
9717
],
[
9905,
9910
]
],
[
[
8929,
8939
],
[
9154,
9164
],
[
9425,
9435
],
[
9719,
9729
]
],
[
[
9141,
9151
],
[
9352,
9362
]
],
[
[
9339,
9349
],
[
9719,
9729
]
],
[
[
9412,
9422
],
[
9623,
9633
]
],
[
[
9610,
9620
],
[
9719,
9729
]
],
[
[
9680,
9690
],
[
9768,
9778
]
],
[
[
9744,
9754
],
[
9998,
10008
]
],
[
[
9876,
9877
],
[
9958,
9959
],
[
9989,
9990
]
],
[
[
9945,
9955
],
[
9998,
10008
]
],
[
[
10040,
10046
],
[
10371,
10377
],
[
10484,
10490
]
],
[
[
10072,
10082
],
[
10619,
10629
],
[
10683,
10693
]
],
[
[
10112,
10119
],
[
10154,
10161
]
],
[
[
10140,
10151
],
[
11928,
11939
]
],
[
[
10178,
10186
],
[
11812,
11820
]
],
[
[
10225,
10227
],
[
10292,
10294
]
],
[
[
10274,
10281
],
[
11059,
11066
],
[
12028,
12035
]
],
[
[
10306,
10307
],
[
10380,
10381
],
[
10493,
10494
],
[
10703,
10704
],
[
10710,
10711
],
[
11081,
11082
],
[
11136,
11137
],
[
11205,
11206
]
],
[
[
10350,
10354
],
[
10441,
10445
],
[
10496,
10500
],
[
10630,
10634
],
[
10694,
10698
]
],
[
[
10471,
10481
],
[
10536,
10546
],
[
10768,
10778
]
],
[
[
10514,
10515
]
],
[
[
10517,
10522
],
[
10892,
10897
]
],
[
[
10607,
10616
],
[
10757,
10766
]
],
[
[
10671,
10680
],
[
10757,
10766
]
],
[
[
10735,
10743
],
[
10883,
10891
]
],
[
[
10872,
10880
],
[
10922,
10930
],
[
10969,
10977
]
],
[
[
10911,
10915
],
[
10978,
10982
]
],
[
[
10958,
10966
],
[
11085,
11093
],
[
11144,
11152
]
],
[
[
11034,
11041
],
[
11059,
11066
],
[
12028,
12035
]
],
[
[
11325,
11337
],
[
11466,
11478
],
[
11534,
11546
]
],
[
[
11511,
11523
],
[
11676,
11688
]
],
[
[
11582,
11583
],
[
11660,
11661
],
[
11739,
11740
],
[
11721,
11722
]
],
[
[
11634,
11638
],
[
11745,
11749
]
],
[
[
11761,
11769
],
[
11801,
11809
]
],
[
[
11790,
11798
],
[
11941,
11949
]
],
[
[
12160,
12161
],
[
12192,
12193
]
],
[
[
12307,
12310
],
[
12356,
12359
]
]
] |
import click
from flask.cli import FlaskGroup
from . import create_app
@click.group(cls=FlaskGroup, create_app=create_app)
def main():
"""Management script for the python_project_template application."""
if __name__ == "__main__": # pragma: no cover
main()
| [
[
[
7,
12
],
[
74,
79
]
],
[
[
35,
45
],
[
90,
100
]
],
[
[
60,
70
],
[
113,
123
]
],
[
[
129,
133
],
[
263,
267
]
]
] |
import pytorch_lightning as pl
from loss.loss import get_loss
from optimizer.optimizer import get_optimizer
from scheduler.scheduler import get_scheduler
import torch
import numpy as np
from pytorch_lightning.metrics import Accuracy
import segmentation_models_pytorch as smp
from utils.utils import load_obj
import albumentations as A
from utils.preprocessing import *
import shutil
class LitClassifier(pl.LightningModule):
def __init__(self, hparams, model):
super().__init__()
self.save_hyperparameters(hparams)
self.model = model
self.criteria = get_loss(hparams.training.loss)
#self.accuracy = Accuracy()
self.dice = smp.utils.losses.DiceLoss(activation='sigmoid')
def forward(self, x):
# use forward for inference/predictions
return self.model(x)
def configure_optimizers(self):
optimizer = get_optimizer(self.model.parameters(), self.hparams.training.optimizer)
scheduler = get_scheduler(optimizer, self.hparams.training.scheduler)
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
x, y = batch
if self.hparams.dataset.mixup:
num_batch = self.hparams.dataset.batch_size
alpha = 0.2
#rnd = torch.from_numpy(np.random.beta(alpha,alpha,int(num_batch/2))).type_as(x)
#rnd = rnd.reshape(int(num_batch/2), 1, 1, 1)
#x = x[:int(num_batch/2)]*rnd + x[int(num_batch/2):]*(1-rnd)
#y = y[:int(num_batch/2)]*rnd + y[int(num_batch/2):]*(1-rnd)
rnd = torch.from_numpy(np.random.beta(alpha,alpha,1)).type_as(x)
x = x[:int(num_batch/2)]*rnd + x[int(num_batch/2):]*(1-rnd)
y_hat = self.model(x)
if self.hparams.dataset.mixup:
loss = self.criteria(y_hat, y[:int(num_batch/2)])*rnd + self.criteria(y_hat, y[int(num_batch/2):])*(1-rnd)
else:
loss = self.criteria(y_hat, y)
self.log('train_loss', loss, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = self.criteria(y_hat, y)
dice = 1-self.dice(y_hat, y)
#self.log('val_loss', loss)
#self.log('val_dice', dice)
return {
"val_loss": loss,
"val_dice": dice
}
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
avg_val_dice = torch.stack([x["val_dice"] for x in outputs]).mean()
self.log('val_loss', avg_val_loss)
self.log('val_dice', avg_val_dice)
#y = torch.cat([x["y"] for x in outputs]).cpu()
#y_hat = torch.cat([x["y_hat"] for x in outputs]).cpu()
#preds = np.argmax(y_hat, axis=1)
#val_accuracy = self.accuracy(y, preds)
#self.log('avg_val_loss', avg_val_loss)
#self.log('val_acc', val_accuracy)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = self.criteria(y_hat, y)
self.log('test_loss', loss)
| [
[
[
7,
30
],
[
408,
410
]
],
[
[
53,
61
],
[
590,
598
]
],
[
[
94,
107
],
[
888,
901
]
],
[
[
140,
153
],
[
981,
994
]
],
[
[
162,
167
],
[
1591,
1596
],
[
2453,
2458
],
[
2529,
2534
]
],
[
[
175,
186
],
[
1608,
1610
]
],
[
[
225,
233
]
],
[
[
241,
275
],
[
679,
682
]
],
[
[
301,
309
]
],
[
[
317,
336
]
],
[
[
369,
370
]
],
[
[
378,
384
]
],
[
[
394,
407
]
]
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Analyze CSV file into scores.
Created on Sat Feb 12 22:15:29 2022 // @hk_nien
"""
from pathlib import Path
import os
import re
import sys
import pandas as pd
import numpy as np
PCODES = dict([
# Regio Noord
(1011, 'Amsterdam'),
(1625, 'Hoorn|Zwaag'),
(1811, 'Alkmaar'),
(7471, 'Goor'),
(7556, 'Hengelo'),
(7903, 'Hoogeveen'),
(7942, 'Meppel'),
(8011, 'Zwolle'),
(8232, 'Lelystad'),
(8442, 'Heerenveen'),
(8911, 'Leeuwarden'),
(9291, 'Kollum'),
(9501, 'Stadskanaal'),
(9726, 'Groningen'),
# Regio Midden
(2406, 'Alphen a/d Rijn'),
(2515, 'Den Haag'),
(3013, 'Rotterdam'),
(3511, 'Utrecht'),
(3901, 'Veenendaal'),
((7137, 7131), 'Lichtenvoorde|Groenlo'),
(7311, 'Apeldoorn'),
# Regio Zuid
(4325, 'Renesse'),
(4462, 'Goes'),
(4701, 'Roosendaal'),
(5038, 'Tilburg'),
(5401, 'Uden'),
(5611, 'Eindhoven'),
(5801, 'Oostrum'),
(6101, 'Echt'),
(6229, 'Maastricht'),
(6541, 'Nijmegen'),
])
def get_bad_scan_times():
"""Return list of Timestamps with bad scan times, from CSV data."""
df = pd.read_csv('data-ggd/ggd_bad_scans.txt', comment='#')
tstamps = pd.to_datetime(df['Timestamp']).to_list()
return tstamps
def _mean_time(ts_list):
"""Return mean timestamp value from list of timestamps."""
ts0 = ts_list[0]
delta_sum = pd.Timedelta(0)
for ts in ts_list:
delta_sum += (ts -ts0)
ts_mean = ts0 + delta_sum / len(ts_list)
return ts_mean
def _delta_time_hhmm(hm):
"""Convert 'hh:mm' string to TimeDelta."""
return pd.Timedelta(f'{hm}:00')
def _summary_to_scores(summary):
"""Convert summary from _read_log to scores dict and effective timestamp.
Parameters:
- summary: dict with int(pc4) -> [(query_time, appt_time), ...]
Return:
- scores dict: int(pc4) -> score (int or float or '?')
- timestamp: middle query timestamp of this run.
"""
# Convert to number codes.
scores = {k: '?' for k in PCODES}
multi_pcs = {} # pc4 -> (pc4[0], pc4[1], ...)
for pc in PCODES:
if isinstance(pc, tuple):
for pc1 in pc:
multi_pcs[pc1] = pc
qtms = []
dhm = _delta_time_hhmm
for pc4, vlist in summary.items():
pc4 = int(pc4)
if pc4 not in scores:
if pc4 in multi_pcs:
pc4_key = multi_pcs[pc4]
else:
print(f'{pc4} not in list...')
continue
else:
pc4_key = pc4
if len(vlist) == 0:
scores[pc4_key] = 7
continue
qtm = _mean_time([v[0] for v in vlist]) # query time
qtms.append(qtm)
atm = min(v[1] for v in vlist) # earliest appointment time
qtm_00 = pd.Timestamp(qtm.strftime('%Y-%m-%dT00:00'))
thresholds = [
(3, qtm_00 + dhm('23:59')),
(4, qtm + dhm('24:00')),
(5, qtm_00 + dhm('48:00')),
(6, qtm + dhm('48:00')),
(6.3, qtm_00 + dhm('72:00')),
(6.7, qtm + dhm('72:00')),
(7, atm)
]
if qtm.hour < 9:
thresholds.insert(0, (1, qtm_00 + dhm('13:00')))
elif qtm.hour < 13:
thresholds.insert(0, (1, qtm + dhm('4:00')))
elif qtm.hour < 17:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm + dhm('20:00')))
else:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm_00 + dhm('37:00')))
for s, tm in thresholds:
if atm < tm:
scores[pc4_key] = s
break
if len(qtms) == 0:
qtm_mid = pd.Timestamp(None)
else:
qtm_min = min(qtms)
qtm_mid = qtm_min + (max(qtms) - qtm_min)/2
return scores, qtm_mid
def _get_min_wait(summary):
"""Return minimum and median wait Timedelta between scan time and appointment.
summary is dict of pc4 -> list of timestamps
No data -> 999 h.
For the median, NaT is counted as infinite.
"""
wtimes = []
for _, vlist in summary.items():
wtimes_this = [atm - qtm for qtm, atm in vlist]
wtimes.append(
min(wtimes_this) if wtimes_this else pd.Timedelta(99, 'h')
)
minwait = min(wtimes) if wtimes else 999
medwait = pd.Timedelta(np.median(wtimes))
return minwait, medwait
def load_csv(csv_fname):
"""Return DataFrame and list of start times (+1)."""
df = pd.read_csv(csv_fname, comment='#')
df['req_pc4'] = df['req_pc4'].astype(int)
for c in df.columns:
if c.endswith('_time') or c.endswith('_date'):
df[c] = pd.to_datetime(df[c])
else:
df.loc[df[c].isna(), c] = None
# start_tms: list of scan start times (plus one extra at the end)
start_tms = df.loc[df['scan_time'].diff() > pd.Timedelta('10 min'), 'scan_time']
start_tms = [df.iloc[0]['scan_time']] + list(start_tms)
start_tms += [df.iloc[-1]['scan_time'] + pd.Timedelta('1 min')]
return df, start_tms
def load_multi_csvs(csv_fnames):
"""Return DataFrame and list of start times (+1)"""
dfs = []
start_tms = []
for f in csv_fnames:
df, st = load_csv(f)
dfs.append(df)
start_tms.extend(st[:-1])
df = pd.concat(dfs).reset_index()
start_tms.append(df.iloc[-1]['scan_time'] + pd.Timedelta('1 min'))
return df, start_tms
def get_scan_scores(df, tm_range):
"""Get scan scores as pc4 -> score dict.
Parameters:
- df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,
opt0_time, opt0_loc_id, etc.
- tm_range: (tm_start, tm_stop) timestamps.
Return:
- tstamp: timestamp of the scan (mid-point)
- scores: dict of pc4->score
- min_wait: Timedelta of minimum wait time from scan to appointment
"""
mask = (df['scan_time'] >= tm_range[0]) & (df['scan_time'] < tm_range[1])
df1 = df.loc[mask]
summary = {}
for pc4, city_re in PCODES.items():
pc4_tup = (pc4,) if isinstance(pc4, int) else pc4
options = []
req_pc4 = None
for _, row in df1.loc[df1['req_pc4'].isin(pc4_tup)].iterrows():
req_pc4 = int(row['req_pc4'])
for i in range(3):
addr = row[f'opt{i}_short_addr']
if addr and re.match(f'{city_re}$', addr[5:]):
options.append((row['scan_time'], row[f'opt{i}_time']))
if req_pc4 is not None:
summary[req_pc4] = options
scores, tstamp = _summary_to_scores(summary)
if pd.isna(tstamp):
tstamp = df1.iloc[len(df1)//2]['scan_time']
minwait, medwait = _get_min_wait(summary)
if medwait == 999:
medwait = pd.Timedelta(None)
return tstamp, scores, minwait, medwait
def get_scan_scores_df(df, tm_ranges, decimal_comma=True):
"""Get scan scores as dataframe, from csv dataframe.
Blacklisted scan times are dropped.
Parameters:
- df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,
opt0_time, opt0_loc_id, etc.
- tm_ranges: list of timestamps (+one at the end) with boundaries
of timestamp ranges.
- decimal_comma: True to have string values 6,3 rather than float 6.3.
Return:
- Dataframe with scores, date_str, time_str, pc4, min_wait, med_wait as columns.
"""
n = len(tm_ranges)
records = []
index = []
minwait_hs = []
medwait_hs = []
bad_stimes = get_bad_scan_times()
for i in range(n-1):
tm_ra = tm_ranges[i:i+2]
is_ok = True
for tm in bad_stimes:
if tm_ra[0] <= tm < tm_ra[1]:
is_ok = False
break
if not is_ok:
print(f'Dropped scan at {tm_ra[0].strftime("%Y-%m-%d %H:%M")}')
continue
tm, scores, minwait, medwait = get_scan_scores(df, tm_ra)
records.append(scores)
index.append(tm)
minwait_hs.append(minwait.total_seconds() / 3600)
medwait_hs.append(medwait.total_seconds() / 3600)
dates = [t.strftime('%Y-%m-%d') for t in index]
times = [t.strftime('%H:%M') for t in index]
sdf = pd.DataFrame.from_records(records)
sdf.insert(0, 'Time', times)
sdf.insert(0, 'Date', dates)
sdf['min_wait_h'] = np.around(minwait_hs, 2)
sdf['med_wait_h'] = np.around(medwait_hs, 2)
sdf.loc[sdf['min_wait_h'].isna(), 'min_wait_h'] = 999
sdf.columns = [
('/'.join([str(x) for x in c]) if isinstance(c, tuple) else c)
for c in sdf.columns
]
if decimal_comma:
for c in sdf.columns[2:]:
sdf[c] = sdf[c].astype(str)
sdf[c] = sdf[c].str.replace('.', ',', regex=False)
sdf[c] = sdf[c].str.replace(',0$', '', regex=False)
sdf[c] = sdf[c].str.replace('?', '', regex=False)
return sdf
if __name__ == '__main__':
in_spyder = ('SPYDER_ARGS' in os.environ)
csv_fnames = sorted(Path('data-ggd').glob('ggd_scan-????-W??.csv'))
do_all = ('--all' in sys.argv)
do_all = do_all or in_spyder and input('(A)ll or latest?').lower() == 'a'
if do_all:
df, start_tms = load_multi_csvs(csv_fnames)
sdf = get_scan_scores_df(df, start_tms).iloc[::-1]
else:
df, start_tms = load_csv(csv_fnames[-1])
sdf = get_scan_scores_df(df, start_tms[-2:])
print(sdf)
if len(sdf) > 1:
sdf.to_clipboard(index=False)
print('Copied to clipboard including headers')
elif len(sdf) == 1:
sdf.iloc[[0], 2:].to_clipboard(header=False, index=False)
print('Copied to clipboard, scores only.')
else:
print('No output.')
if not in_spyder:
# Note: in Spyder, copy/paste will stall while input is blocked.
input('Press Enter to quit and clear clipboard.')
| [
[
[
154,
158
],
[
9067,
9071
]
],
[
[
166,
168
],
[
9031,
9033
]
],
[
[
176,
178
],
[
6454,
6456
]
],
[
[
186,
189
],
[
9140,
9143
]
],
[
[
197,
209
],
[
1184,
1186
],
[
1253,
1255
],
[
1440,
1442
],
[
1660,
1662
],
[
2847,
2849
],
[
3797,
3799
],
[
4356,
4358
],
[
4451,
4453
],
[
4604,
4606
],
[
4787,
4789
],
[
4985,
4987
],
[
5127,
5129
],
[
5417,
5419
],
[
5494,
5496
],
[
6692,
6694
],
[
6848,
6850
],
[
8279,
8281
]
],
[
[
217,
228
],
[
4464,
4466
],
[
8404,
8406
],
[
8453,
8455
]
],
[
[
230,
236
],
[
2080,
2086
],
[
2153,
2159
],
[
6114,
6120
]
],
[
[
1081,
1099
],
[
7586,
7604
]
],
[
[
1319,
1329
],
[
2691,
2701
]
],
[
[
1580,
1596
],
[
2283,
2299
]
],
[
[
1691,
1709
],
[
6657,
6675
]
],
[
[
3939,
3952
],
[
6784,
6797
]
],
[
[
4517,
4525
],
[
9388,
9396
],
[
5339,
5347
]
],
[
[
5180,
5195
],
[
9267,
9282
]
],
[
[
5548,
5563
],
[
7968,
7983
]
],
[
[
6917,
6935
],
[
9309,
9327
],
[
9427,
9445
]
],
[
[
9001,
9010
],
[
9173,
9182
],
[
9786,
9795
]
],
[
[
9047,
9057
],
[
9283,
9293
],
[
9397,
9407
]
],
[
[
9119,
9125
],
[
9163,
9169
]
],
[
[
9154,
9160
],
[
9235,
9241
]
],
[
[
9251,
9253
],
[
9328,
9330
]
],
[
[
9255,
9264
],
[
9332,
9341
]
],
[
[
9303,
9306
],
[
9476,
9479
],
[
9492,
9495
],
[
9510,
9513
],
[
9608,
9611
],
[
9627,
9630
]
],
[
[
9372,
9374
],
[
9446,
9448
]
],
[
[
9376,
9385
],
[
9450,
9459
]
],
[
[
9421,
9424
],
[
9476,
9479
],
[
9492,
9495
],
[
9510,
9513
],
[
9608,
9611
],
[
9627,
9630
]
]
] |
# -*- coding: utf-8 -*-
"""Loan Qualifier Application.
This is a command line application to match applicants with qualifying loans.
Example:
$ python app.py
"""
from re import T
import sys
import fire
import questionary
from pathlib import Path
import csv
from qualifier.utils.fileio import (
load_csv,
save_csv,
)
from qualifier.utils.calculators import (
calculate_monthly_debt_ratio,
calculate_loan_to_value_ratio,
)
from qualifier.filters.max_loan_size import filter_max_loan_size
from qualifier.filters.credit_score import filter_credit_score
from qualifier.filters.debt_to_income import filter_debt_to_income
from qualifier.filters.loan_to_value import filter_loan_to_value
def load_bank_data():
"""Ask for the file path to the latest banking data and load the CSV file.
Returns:
The bank data from the data rate sheet CSV file.
"""
csvpath = questionary.text("Enter a file path to a rate-sheet (.csv):").ask()
csvpath = Path(csvpath)
if not csvpath.exists():
sys.exit(f"Oops! Can't find this path: {csvpath}")
return load_csv(csvpath)
def get_applicant_info():
"""Prompt dialog to get the applicant's financial information.
Returns:
Returns the applicant's financial information.
"""
credit_score = questionary.text("What's your credit score?").ask()
debt = questionary.text("What's your current amount of monthly debt?").ask()
income = questionary.text("What's your total monthly income?").ask()
loan_amount = questionary.text("What's your desired loan amount?").ask()
home_value = questionary.text("What's your home value?").ask()
credit_score = int(credit_score)
debt = float(debt)
income = float(income)
loan_amount = float(loan_amount)
home_value = float(home_value)
return credit_score, debt, income, loan_amount, home_value
def find_qualifying_loans(bank_data, credit_score, debt, income, loan, home_value):
"""Determine which loans the user qualifies for.
Loan qualification criteria is based on:
- Credit Score
- Loan Size
- Debit to Income ratio (calculated)
- Loan to Value ratio (calculated)
Args:
bank_data (list): A list of bank data.
credit_score (int): The applicant's current credit score.
debt (float): The applicant's total monthly debt payments.
income (float): The applicant's total monthly income.
loan (float): The total loan amount applied for.
home_value (float): The estimated home value.
Returns:
A list of the banks willing to underwrite the loan.
"""
# Calculate the monthly debt ratio
monthly_debt_ratio = calculate_monthly_debt_ratio(debt, income)
print(f"The monthly debt to income ratio is {monthly_debt_ratio:.02f}")
# Calculate loan to value ratio
loan_to_value_ratio = calculate_loan_to_value_ratio(loan, home_value)
print(f"The loan to value ratio is {loan_to_value_ratio:.02f}.")
# Run qualification filters
bank_data_filtered = filter_max_loan_size(loan, bank_data)
bank_data_filtered = filter_credit_score(credit_score, bank_data_filtered)
bank_data_filtered = filter_debt_to_income(monthly_debt_ratio, bank_data_filtered)
bank_data_filtered = filter_loan_to_value(loan_to_value_ratio, bank_data_filtered)
print(f"Found {len(bank_data_filtered)} qualifying loans")
return bank_data_filtered
def save_qualifying_loans(qualifying_loans):
"""Saves the qualifying loans to a CSV file.
Args:
qualifying_loans (list of lists): The qualifying bank loans.
"""
# @TODO: Complete the usability dialog for savings the CSV Files.
# YOUR CODE HERE!
choice = questionary.confirm ("Would you like to save the qualifying loans?").ask()
if (choice == T) :
filepath = questionary.text ("Please enter the file path").ask()
save_csv(qualifying_loans, filepath)
def run():
"""The main function for running the script."""
# Load the latest Bank data
bank_data = load_bank_data()
# Get the applicant's information
credit_score, debt, income, loan_amount, home_value = get_applicant_info()
# Find qualifying loans
qualifying_loans = find_qualifying_loans(
bank_data, credit_score, debt, income, loan_amount, home_value
)
# Save qualifying loans
save_qualifying_loans(qualifying_loans)
if __name__ == "__main__":
fire.Fire(run)
| [
[
[
183,
184
],
[
3839,
3840
]
],
[
[
192,
195
],
[
1037,
1040
]
],
[
[
203,
207
],
[
4469,
4473
]
],
[
[
215,
226
],
[
904,
915
],
[
1310,
1321
],
[
1373,
1384
],
[
1456,
1467
],
[
1534,
1545
],
[
1610,
1621
],
[
3745,
3756
],
[
3863,
3874
]
],
[
[
247,
251
],
[
986,
990
]
],
[
[
259,
262
]
],
[
[
306,
314
],
[
1100,
1108
]
],
[
[
320,
328
],
[
3925,
3933
]
],
[
[
380,
408
],
[
2712,
2740
]
],
[
[
414,
443
],
[
2894,
2923
]
],
[
[
492,
512
],
[
3069,
3089
]
],
[
[
556,
575
],
[
3132,
3151
]
],
[
[
621,
642
],
[
3211,
3232
]
],
[
[
687,
707
],
[
3298,
3318
]
],
[
[
713,
727
],
[
4076,
4090
]
],
[
[
1124,
1142
],
[
4190,
4208
]
],
[
[
1890,
1911
],
[
4263,
4284
]
],
[
[
3461,
3482
],
[
4396,
4417
]
],
[
[
3968,
3971
],
[
4479,
4482
]
]
] |
from collections import defaultdict
from datetime import date, datetime, timedelta
from typing import Dict, List, Set, Tuple
from functools import lru_cache
from copy import copy
import traceback
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from pandas import DataFrame
from vnpy.trader.constant import Direction, Offset, Interval, Status
from vnpy.trader.database import database_manager
from vnpy.trader.object import OrderData, TradeData, BarData
from vnpy.trader.utility import round_to, extract_vt_symbol
from .template import StrategyTemplate
INTERVAL_DELTA_MAP = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
Interval.DAILY: timedelta(days=1),
}
class BacktestingEngine:
""""""
gateway_name = "BACKTESTING"
def __init__(self):
""""""
self.vt_symbols: List[str] = []
self.start: datetime = None
self.end: datetime = None
self.rates: Dict[str, float] = 0
self.slippages: Dict[str, float] = 0
self.sizes: Dict[str, float] = 1
self.priceticks: Dict[str, float] = 0
self.capital: float = 1_000_000
self.risk_free: float = 0.02
self.strategy: StrategyTemplate = None
self.bars: Dict[str, BarData] = {}
self.datetime: datetime = None
self.interval: Interval = None
self.days: int = 0
self.history_data: Dict[Tuple, BarData] = {}
self.dts: Set[datetime] = set()
self.limit_order_count = 0
self.limit_orders = {}
self.active_limit_orders = {}
self.trade_count = 0
self.trades = {}
self.logs = []
self.daily_results = {}
self.daily_df = None
def clear_data(self) -> None:
"""
Clear all data of last backtesting.
"""
self.strategy = None
self.bars = {}
self.datetime = None
self.limit_order_count = 0
self.limit_orders.clear()
self.active_limit_orders.clear()
self.trade_count = 0
self.trades.clear()
self.logs.clear()
self.daily_results.clear()
self.daily_df = None
def set_parameters(
self,
vt_symbols: List[str],
interval: Interval,
start: datetime,
rates: Dict[str, float],
slippages: Dict[str, float],
sizes: Dict[str, float],
priceticks: Dict[str, float],
capital: int = 0,
end: datetime = None,
risk_free: float = 0
) -> None:
""""""
self.vt_symbols = vt_symbols
self.interval = interval
self.rates = rates
self.slippages = slippages
self.sizes = sizes
self.priceticks = priceticks
self.start = start
self.end = end
self.capital = capital
self.risk_free = risk_free
def add_strategy(self, strategy_class: type, setting: dict) -> None:
""""""
self.strategy = strategy_class(
self, strategy_class.__name__, copy(self.vt_symbols), setting
)
def load_data(self) -> None:
""""""
self.output("开始加载历史数据")
if not self.end:
self.end = datetime.now()
if self.start >= self.end:
self.output("起始日期必须小于结束日期")
return
# Clear previously loaded history data
self.history_data.clear()
self.dts.clear()
# Load 30 days of data each time and allow for progress update
progress_delta = timedelta(days=30)
total_delta = self.end - self.start
interval_delta = INTERVAL_DELTA_MAP[self.interval]
for vt_symbol in self.vt_symbols:
start = self.start
end = self.start + progress_delta
progress = 0
data_count = 0
while start < self.end:
end = min(end, self.end) # Make sure end time stays within set range
data = load_bar_data(
vt_symbol,
self.interval,
start,
end
)
for bar in data:
self.dts.add(bar.datetime)
self.history_data[(bar.datetime, vt_symbol)] = bar
data_count += 1
progress += progress_delta / total_delta
progress = min(progress, 1)
progress_bar = "#" * int(progress * 10)
self.output(f"{vt_symbol}加载进度:{progress_bar} [{progress:.0%}]")
start = end + interval_delta
end += (progress_delta + interval_delta)
self.output(f"{vt_symbol}历史数据加载完成,数据量:{data_count}")
self.output("所有历史数据加载完成")
def run_backtesting(self) -> None:
""""""
self.strategy.on_init()
# Generate sorted datetime list
dts = list(self.dts)
dts.sort()
# Use the first [days] of history data for initializing strategy
day_count = 0
ix = 0
for ix, dt in enumerate(dts):
if self.datetime and dt.day != self.datetime.day:
day_count += 1
if day_count >= self.days:
break
try:
self.new_bars(dt)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.strategy.inited = True
self.output("策略初始化完成")
self.strategy.on_start()
self.strategy.trading = True
self.output("开始回放历史数据")
# Use the rest of history data for running backtesting
for dt in dts[ix:]:
try:
self.new_bars(dt)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.output("历史数据回放结束")
def calculate_result(self) -> None:
""""""
self.output("开始计算逐日盯市盈亏")
if not self.trades:
self.output("成交记录为空,无法计算")
return
# Add trade data into daily reuslt.
for trade in self.trades.values():
d = trade.datetime.date()
daily_result = self.daily_results[d]
daily_result.add_trade(trade)
# Calculate daily result by iteration.
pre_closes = {}
start_poses = {}
for daily_result in self.daily_results.values():
daily_result.calculate_pnl(
pre_closes,
start_poses,
self.sizes,
self.rates,
self.slippages,
)
pre_closes = daily_result.close_prices
start_poses = daily_result.end_poses
# Generate dataframe
results = defaultdict(list)
for daily_result in self.daily_results.values():
fields = [
"date", "trade_count", "turnover",
"commission", "slippage", "trading_pnl",
"holding_pnl", "total_pnl", "net_pnl"
]
for key in fields:
value = getattr(daily_result, key)
results[key].append(value)
self.daily_df = DataFrame.from_dict(results).set_index("date")
self.output("逐日盯市盈亏计算完成")
return self.daily_df
def calculate_statistics(self, df: DataFrame = None, output=True) -> None:
""""""
self.output("开始计算策略统计指标")
# Check DataFrame input exterior
if df is None:
df = self.daily_df
# Check for init DataFrame
if df is None:
# Set all statistics to 0 if no trade.
start_date = ""
end_date = ""
total_days = 0
profit_days = 0
loss_days = 0
end_balance = 0
max_drawdown = 0
max_ddpercent = 0
max_drawdown_duration = 0
total_net_pnl = 0
daily_net_pnl = 0
total_commission = 0
daily_commission = 0
total_slippage = 0
daily_slippage = 0
total_turnover = 0
daily_turnover = 0
total_trade_count = 0
daily_trade_count = 0
total_return = 0
annual_return = 0
daily_return = 0
return_std = 0
sharpe_ratio = 0
return_drawdown_ratio = 0
else:
# Calculate balance related time series data
df["balance"] = df["net_pnl"].cumsum() + self.capital
df["return"] = np.log(df["balance"] / df["balance"].shift(1)).fillna(0)
df["highlevel"] = (
df["balance"].rolling(
min_periods=1, window=len(df), center=False).max()
)
df["drawdown"] = df["balance"] - df["highlevel"]
df["ddpercent"] = df["drawdown"] / df["highlevel"] * 100
# Calculate statistics value
start_date = df.index[0]
end_date = df.index[-1]
total_days = len(df)
profit_days = len(df[df["net_pnl"] > 0])
loss_days = len(df[df["net_pnl"] < 0])
end_balance = df["balance"].iloc[-1]
max_drawdown = df["drawdown"].min()
max_ddpercent = df["ddpercent"].min()
max_drawdown_end = df["drawdown"].idxmin()
if isinstance(max_drawdown_end, date):
max_drawdown_start = df["balance"][:max_drawdown_end].idxmax()
max_drawdown_duration = (max_drawdown_end - max_drawdown_start).days
else:
max_drawdown_duration = 0
total_net_pnl = df["net_pnl"].sum()
daily_net_pnl = total_net_pnl / total_days
total_commission = df["commission"].sum()
daily_commission = total_commission / total_days
total_slippage = df["slippage"].sum()
daily_slippage = total_slippage / total_days
total_turnover = df["turnover"].sum()
daily_turnover = total_turnover / total_days
total_trade_count = df["trade_count"].sum()
daily_trade_count = total_trade_count / total_days
total_return = (end_balance / self.capital - 1) * 100
annual_return = total_return / total_days * 240
daily_return = df["return"].mean() * 100
return_std = df["return"].std() * 100
if return_std:
daily_risk_free = self.risk_free / np.sqrt(240)
sharpe_ratio = (daily_return - daily_risk_free) / return_std * np.sqrt(240)
else:
sharpe_ratio = 0
return_drawdown_ratio = -total_net_pnl / max_drawdown
# Output
if output:
self.output("-" * 30)
self.output(f"首个交易日:\t{start_date}")
self.output(f"最后交易日:\t{end_date}")
self.output(f"总交易日:\t{total_days}")
self.output(f"盈利交易日:\t{profit_days}")
self.output(f"亏损交易日:\t{loss_days}")
self.output(f"起始资金:\t{self.capital:,.2f}")
self.output(f"结束资金:\t{end_balance:,.2f}")
self.output(f"总收益率:\t{total_return:,.2f}%")
self.output(f"年化收益:\t{annual_return:,.2f}%")
self.output(f"最大回撤: \t{max_drawdown:,.2f}")
self.output(f"百分比最大回撤: {max_ddpercent:,.2f}%")
self.output(f"最长回撤天数: \t{max_drawdown_duration}")
self.output(f"总盈亏:\t{total_net_pnl:,.2f}")
self.output(f"总手续费:\t{total_commission:,.2f}")
self.output(f"总滑点:\t{total_slippage:,.2f}")
self.output(f"总成交金额:\t{total_turnover:,.2f}")
self.output(f"总成交笔数:\t{total_trade_count}")
self.output(f"日均盈亏:\t{daily_net_pnl:,.2f}")
self.output(f"日均手续费:\t{daily_commission:,.2f}")
self.output(f"日均滑点:\t{daily_slippage:,.2f}")
self.output(f"日均成交金额:\t{daily_turnover:,.2f}")
self.output(f"日均成交笔数:\t{daily_trade_count}")
self.output(f"日均收益率:\t{daily_return:,.2f}%")
self.output(f"收益标准差:\t{return_std:,.2f}%")
self.output(f"Sharpe Ratio:\t{sharpe_ratio:,.2f}")
self.output(f"收益回撤比:\t{return_drawdown_ratio:,.2f}")
statistics = {
"start_date": start_date,
"end_date": end_date,
"total_days": total_days,
"profit_days": profit_days,
"loss_days": loss_days,
"capital": self.capital,
"end_balance": end_balance,
"max_drawdown": max_drawdown,
"max_ddpercent": max_ddpercent,
"max_drawdown_duration": max_drawdown_duration,
"total_net_pnl": total_net_pnl,
"daily_net_pnl": daily_net_pnl,
"total_commission": total_commission,
"daily_commission": daily_commission,
"total_slippage": total_slippage,
"daily_slippage": daily_slippage,
"total_turnover": total_turnover,
"daily_turnover": daily_turnover,
"total_trade_count": total_trade_count,
"daily_trade_count": daily_trade_count,
"total_return": total_return,
"annual_return": annual_return,
"daily_return": daily_return,
"return_std": return_std,
"sharpe_ratio": sharpe_ratio,
"return_drawdown_ratio": return_drawdown_ratio,
}
# Filter potential error infinite value
for key, value in statistics.items():
if value in (np.inf, -np.inf):
value = 0
statistics[key] = np.nan_to_num(value)
self.output("策略统计指标计算完成")
return statistics
def show_chart(self, df: DataFrame = None) -> None:
""""""
# Check DataFrame input exterior
if df is None:
df = self.daily_df
# Check for init DataFrame
if df is None:
return
fig = make_subplots(
rows=4,
cols=1,
subplot_titles=["Balance", "Drawdown", "Daily Pnl", "Pnl Distribution"],
vertical_spacing=0.06
)
balance_line = go.Scatter(
x=df.index,
y=df["balance"],
mode="lines",
name="Balance"
)
drawdown_scatter = go.Scatter(
x=df.index,
y=df["drawdown"],
fillcolor="red",
fill='tozeroy',
mode="lines",
name="Drawdown"
)
pnl_bar = go.Bar(y=df["net_pnl"], name="Daily Pnl")
pnl_histogram = go.Histogram(x=df["net_pnl"], nbinsx=100, name="Days")
fig.add_trace(balance_line, row=1, col=1)
fig.add_trace(drawdown_scatter, row=2, col=1)
fig.add_trace(pnl_bar, row=3, col=1)
fig.add_trace(pnl_histogram, row=4, col=1)
fig.update_layout(height=1000, width=1000)
fig.show()
def update_daily_close(self, bars: Dict[str, BarData], dt: datetime) -> None:
""""""
d = dt.date()
close_prices = {}
for bar in bars.values():
close_prices[bar.vt_symbol] = bar.close_price
daily_result = self.daily_results.get(d, None)
if daily_result:
daily_result.update_close_prices(close_prices)
else:
self.daily_results[d] = PortfolioDailyResult(d, close_prices)
def new_bars(self, dt: datetime) -> None:
""""""
self.datetime = dt
bars: Dict[str, BarData] = {}
for vt_symbol in self.vt_symbols:
bar = self.history_data.get((dt, vt_symbol), None)
# If bar data of vt_symbol at dt exists
if bar:
# Update bar data for crossing order
self.bars[vt_symbol] = bar
# Put bar into dict for strategy.on_bars update
bars[vt_symbol] = bar
# Otherwise, use previous close to backfill
elif vt_symbol in self.bars:
old_bar = self.bars[vt_symbol]
bar = BarData(
symbol=old_bar.symbol,
exchange=old_bar.exchange,
datetime=dt,
open_price=old_bar.close_price,
high_price=old_bar.close_price,
low_price=old_bar.close_price,
close_price=old_bar.close_price,
gateway_name=old_bar.gateway_name
)
self.bars[vt_symbol] = bar
self.cross_limit_order()
self.strategy.on_bars(bars)
self.update_daily_close(self.bars, dt)
def cross_limit_order(self) -> None:
"""
Cross limit order with last bar/tick data.
"""
for order in list(self.active_limit_orders.values()):
bar = self.bars[order.vt_symbol]
long_cross_price = bar.low_price
short_cross_price = bar.high_price
long_best_price = bar.open_price
short_best_price = bar.open_price
# Push order update with status "not traded" (pending).
if order.status == Status.SUBMITTING:
order.status = Status.NOTTRADED
self.strategy.update_order(order)
# Check whether limit orders can be filled.
long_cross = (
order.direction == Direction.LONG
and order.price >= long_cross_price
and long_cross_price > 0
)
short_cross = (
order.direction == Direction.SHORT
and order.price <= short_cross_price
and short_cross_price > 0
)
if not long_cross and not short_cross:
continue
# Push order update with status "all traded" (filled).
order.traded = order.volume
order.status = Status.ALLTRADED
self.strategy.update_order(order)
self.active_limit_orders.pop(order.vt_orderid)
# Push trade update
self.trade_count += 1
if long_cross:
trade_price = min(order.price, long_best_price)
else:
trade_price = max(order.price, short_best_price)
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
datetime=self.datetime,
gateway_name=self.gateway_name,
)
self.strategy.update_trade(trade)
self.trades[trade.vt_tradeid] = trade
def load_bars(
self,
strategy: StrategyTemplate,
days: int,
interval: Interval
) -> None:
""""""
self.days = days
def send_order(
self,
strategy: StrategyTemplate,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool,
net: bool
) -> List[str]:
""""""
price = round_to(price, self.priceticks[vt_symbol])
symbol, exchange = extract_vt_symbol(vt_symbol)
self.limit_order_count += 1
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=str(self.limit_order_count),
direction=direction,
offset=offset,
price=price,
volume=volume,
status=Status.SUBMITTING,
datetime=self.datetime,
gateway_name=self.gateway_name,
)
self.active_limit_orders[order.vt_orderid] = order
self.limit_orders[order.vt_orderid] = order
return [order.vt_orderid]
def cancel_order(self, strategy: StrategyTemplate, vt_orderid: str) -> None:
"""
Cancel order by vt_orderid.
"""
if vt_orderid not in self.active_limit_orders:
return
order = self.active_limit_orders.pop(vt_orderid)
order.status = Status.CANCELLED
self.strategy.update_order(order)
def write_log(self, msg: str, strategy: StrategyTemplate = None) -> None:
"""
Write log message.
"""
msg = f"{self.datetime}\t{msg}"
self.logs.append(msg)
def send_email(self, msg: str, strategy: StrategyTemplate = None) -> None:
"""
Send email to default receiver.
"""
pass
def sync_strategy_data(self, strategy: StrategyTemplate) -> None:
"""
Sync strategy data into json file.
"""
pass
def put_strategy_event(self, strategy: StrategyTemplate) -> None:
"""
Put an event to update strategy status.
"""
pass
def output(self, msg) -> None:
"""
Output message of backtesting engine.
"""
print(f"{datetime.now()}\t{msg}")
def get_all_trades(self) -> List[TradeData]:
"""
Return all trade data of current backtesting result.
"""
return list(self.trades.values())
def get_all_orders(self) -> List[OrderData]:
"""
Return all limit order data of current backtesting result.
"""
return list(self.limit_orders.values())
def get_all_daily_results(self) -> List["PortfolioDailyResult"]:
"""
Return all daily result data.
"""
return list(self.daily_results.values())
class ContractDailyResult:
""""""
def __init__(self, result_date: date, close_price: float):
""""""
self.date: date = result_date
self.close_price: float = close_price
self.pre_close: float = 0
self.trades: List[TradeData] = []
self.trade_count: int = 0
self.start_pos: float = 0
self.end_pos: float = 0
self.turnover: float = 0
self.commission: float = 0
self.slippage: float = 0
self.trading_pnl: float = 0
self.holding_pnl: float = 0
self.total_pnl: float = 0
self.net_pnl: float = 0
def add_trade(self, trade: TradeData) -> None:
""""""
self.trades.append(trade)
def calculate_pnl(
self,
pre_close: float,
start_pos: float,
size: int,
rate: float,
slippage: float
) -> None:
""""""
# If no pre_close provided on the first day,
# use value 1 to avoid zero division error
if pre_close:
self.pre_close = pre_close
else:
self.pre_close = 1
# Holding pnl is the pnl from holding position at day start
self.start_pos = start_pos
self.end_pos = start_pos
self.holding_pnl = self.start_pos * (self.close_price - self.pre_close) * size
# Trading pnl is the pnl from new trade during the day
self.trade_count = len(self.trades)
for trade in self.trades:
if trade.direction == Direction.LONG:
pos_change = trade.volume
else:
pos_change = -trade.volume
self.end_pos += pos_change
turnover = trade.volume * size * trade.price
self.trading_pnl += pos_change * (self.close_price - trade.price) * size
self.slippage += trade.volume * size * slippage
self.turnover += turnover
self.commission += turnover * rate
# Net pnl takes account of commission and slippage cost
self.total_pnl = self.trading_pnl + self.holding_pnl
self.net_pnl = self.total_pnl - self.commission - self.slippage
def update_close_price(self, close_price: float) -> None:
""""""
self.close_price = close_price
class PortfolioDailyResult:
""""""
def __init__(self, result_date: date, close_prices: Dict[str, float]):
""""""
self.date: date = result_date
self.close_prices: Dict[str, float] = close_prices
self.pre_closes: Dict[str, float] = {}
self.start_poses: Dict[str, float] = {}
self.end_poses: Dict[str, float] = {}
self.contract_results: Dict[str, ContractDailyResult] = {}
for vt_symbol, close_price in close_prices.items():
self.contract_results[vt_symbol] = ContractDailyResult(result_date, close_price)
self.trade_count: int = 0
self.turnover: float = 0
self.commission: float = 0
self.slippage: float = 0
self.trading_pnl: float = 0
self.holding_pnl: float = 0
self.total_pnl: float = 0
self.net_pnl: float = 0
def add_trade(self, trade: TradeData) -> None:
""""""
contract_result = self.contract_results[trade.vt_symbol]
contract_result.add_trade(trade)
def calculate_pnl(
self,
pre_closes: Dict[str, float],
start_poses: Dict[str, float],
sizes: Dict[str, float],
rates: Dict[str, float],
slippages: Dict[str, float],
) -> None:
""""""
self.pre_closes = pre_closes
for vt_symbol, contract_result in self.contract_results.items():
contract_result.calculate_pnl(
pre_closes.get(vt_symbol, 0),
start_poses.get(vt_symbol, 0),
sizes[vt_symbol],
rates[vt_symbol],
slippages[vt_symbol]
)
self.trade_count += contract_result.trade_count
self.turnover += contract_result.turnover
self.commission += contract_result.commission
self.slippage += contract_result.slippage
self.trading_pnl += contract_result.trading_pnl
self.holding_pnl += contract_result.holding_pnl
self.total_pnl += contract_result.total_pnl
self.net_pnl += contract_result.net_pnl
self.end_poses[vt_symbol] = contract_result.end_pos
def update_close_prices(self, close_prices: Dict[str, float]) -> None:
""""""
self.close_prices = close_prices
for vt_symbol, close_price in close_prices.items():
contract_result = self.contract_results.get(vt_symbol, None)
if contract_result:
contract_result.update_close_price(close_price)
@lru_cache(maxsize=999)
def load_bar_data(
vt_symbol: str,
interval: Interval,
start: datetime,
end: datetime
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
return database_manager.load_bar_data(
symbol, exchange, interval, start, end
)
| [
[
[
24,
35
],
[
6851,
6862
]
],
[
[
57,
61
],
[
9490,
9494
],
[
21821,
21825
],
[
21882,
21886
],
[
24104,
24108
],
[
24177,
24181
]
],
[
[
63,
71
],
[
922,
930
],
[
956,
964
],
[
1338,
1346
],
[
1496,
1504
],
[
2317,
2325
],
[
2507,
2515
],
[
3240,
3248
],
[
15088,
15096
],
[
15520,
15528
],
[
21172,
21180
],
[
26661,
26669
],
[
26680,
26688
]
],
[
[
73,
82
],
[
648,
657
],
[
689,
698
],
[
729,
738
],
[
3554,
3563
]
],
[
[
102,
106
],
[
993,
997
],
[
1038,
1042
],
[
1079,
1083
],
[
1125,
1129
],
[
1291,
1295
],
[
1448,
1452
],
[
2342,
2346
],
[
2379,
2383
],
[
2412,
2416
],
[
2450,
2454
],
[
15064,
15068
],
[
15596,
15600
],
[
24124,
24128
],
[
24223,
24227
],
[
24280,
24284
],
[
24328,
24332
],
[
24374,
24378
],
[
24428,
24432
],
[
25123,
25127
],
[
25162,
25166
],
[
25195,
25199
],
[
25228,
25232
],
[
25265,
25269
],
[
26248,
26252
]
],
[
[
108,
112
],
[
887,
891
],
[
2263,
2267
],
[
19328,
19332
],
[
21230,
21234
],
[
21407,
21411
],
[
21603,
21607
],
[
22003,
22007
]
],
[
[
114,
117
],
[
1492,
1495
]
],
[
[
119,
124
],
[
1453,
1458
]
],
[
[
147,
156
],
[
26564,
26573
]
],
[
[
174,
178
],
[
3069,
3073
]
],
[
[
186,
195
],
[
5414,
5423
],
[
5874,
5883
]
],
[
[
204,
215
],
[
8646,
8648
],
[
10586,
10588
],
[
10678,
10680
],
[
13647,
13649
],
[
13656,
13658
],
[
13721,
13723
]
],
[
[
223,
249
],
[
14271,
14273
],
[
14426,
14428
],
[
14631,
14633
],
[
14697,
14699
]
],
[
[
278,
291
],
[
14063,
14076
]
],
[
[
311,
320
],
[
7276,
7285
],
[
7427,
7436
],
[
13833,
13842
]
],
[
[
355,
364
],
[
17481,
17490
],
[
17667,
17676
],
[
19201,
19210
],
[
23263,
23272
]
],
[
[
366,
372
],
[
19228,
19234
]
],
[
[
374,
382
],
[
631,
639
],
[
674,
682
],
[
713,
721
],
[
1378,
1386
],
[
2292,
2300
],
[
19023,
19031
],
[
26640,
26648
]
],
[
[
384,
390
],
[
17245,
17251
],
[
17295,
17301
],
[
18004,
18010
],
[
19773,
19779
],
[
20326,
20332
]
],
[
[
424,
440
],
[
26767,
26783
]
],
[
[
472,
481
],
[
19524,
19533
],
[
21412,
21421
]
],
[
[
483,
492
],
[
18390,
18399
],
[
21235,
21244
],
[
22008,
22017
],
[
22398,
22407
],
[
24924,
24933
]
],
[
[
494,
501
],
[
1301,
1308
],
[
1460,
1467
],
[
15074,
15081
],
[
15606,
15613
],
[
16164,
16171
]
],
[
[
534,
542
],
[
19370,
19378
]
],
[
[
544,
561
],
[
19441,
19458
],
[
26726,
26743
]
],
[
[
585,
601
],
[
1248,
1264
],
[
18968,
18984
],
[
19140,
19156
],
[
20067,
20083
],
[
20430,
20446
],
[
20631,
20647
],
[
20786,
20802
],
[
20937,
20953
]
],
[
[
604,
622
],
[
3642,
3660
]
],
[
[
758,
775
]
],
[
[
21752,
21771
],
[
24438,
24457
],
[
24572,
24591
]
],
[
[
24034,
24054
],
[
15454,
15474
]
],
[
[
26591,
26604
],
[
3995,
4008
]
]
] |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import Path
import pytest
from pants.backend.codegen.export_codegen_goal import ExportCodegen
from pants.backend.codegen.export_codegen_goal import rules as write_codegen_rules
from pants.core.target_types import FilesSources, ResourcesSources
from pants.core.util_rules import distdir
from pants.engine.fs import CreateDigest, FileContent, Snapshot
from pants.engine.rules import Get, rule
from pants.engine.target import GeneratedSources, GenerateSourcesRequest, Sources, Target
from pants.engine.unions import UnionRule
from pants.testutil.rule_runner import RuleRunner
class Gen1Sources(Sources):
pass
class Gen2Sources(Sources):
pass
class Gen1Target(Target):
alias = "gen1"
core_fields = (Gen1Sources,)
class Gen2Target(Target):
alias = "gen2"
core_fields = (Gen2Sources,)
class Gen1Request(GenerateSourcesRequest):
input = Gen1Sources
output = FilesSources
class Gen2Request(GenerateSourcesRequest):
input = Gen2Sources
output = ResourcesSources
@rule
async def gen1(_: Gen1Request) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("assets/README.md", b"Hello!")]))
return GeneratedSources(result)
@rule
async def gen2(_: Gen2Request) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("src/haskell/app.hs", b"10 * 4")]))
return GeneratedSources(result)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*write_codegen_rules(),
gen1,
gen2,
UnionRule(GenerateSourcesRequest, Gen1Request),
UnionRule(GenerateSourcesRequest, Gen2Request),
*distdir.rules(),
],
target_types=[Gen1Target, Gen2Target],
)
def test_no_codegen_targets(rule_runner: RuleRunner, caplog) -> None:
result = rule_runner.run_goal_rule(ExportCodegen)
assert result.exit_code == 0
assert len(caplog.records) == 1
assert "No codegen files/targets matched. All codegen target types: gen1, gen2" in caplog.text
def test_export_codegen(rule_runner: RuleRunner) -> None:
rule_runner.add_to_build_file("", "gen1(name='gen1')\ngen2(name='gen2')\n")
result = rule_runner.run_goal_rule(ExportCodegen, args=["::"])
assert result.exit_code == 0
parent_dir = Path(rule_runner.build_root, "dist", "codegen")
assert (parent_dir / "assets" / "README.md").read_text() == "Hello!"
assert (parent_dir / "src" / "haskell" / "app.hs").read_text() == "10 * 4"
| [
[
[
152,
156
],
[
2459,
2463
]
],
[
[
165,
171
],
[
1535,
1541
]
],
[
[
227,
240
],
[
2019,
2032
],
[
2381,
2394
]
],
[
[
295,
323
],
[
1635,
1654
]
],
[
[
360,
372
],
[
1040,
1052
]
],
[
[
374,
390
],
[
1135,
1151
]
],
[
[
425,
432
],
[
1827,
1834
]
],
[
[
461,
473
],
[
1245,
1257
],
[
1434,
1446
]
],
[
[
475,
486
],
[
1259,
1270
],
[
1448,
1459
]
],
[
[
488,
496
],
[
1235,
1243
],
[
1424,
1432
]
],
[
[
528,
531
],
[
1231,
1234
],
[
1420,
1423
]
],
[
[
533,
537
],
[
1155,
1159
],
[
1344,
1348
]
],
[
[
570,
586
],
[
1194,
1210
],
[
1316,
1332
],
[
1383,
1399
],
[
1507,
1523
]
],
[
[
588,
610
],
[
978,
1000
],
[
1073,
1095
],
[
1716,
1738
],
[
1776,
1798
]
],
[
[
612,
619
],
[
740,
747
],
[
779,
786
]
],
[
[
621,
627
],
[
817,
823
],
[
897,
903
]
],
[
[
660,
669
],
[
1706,
1715
],
[
1766,
1775
]
],
[
[
709,
719
],
[
1571,
1581
],
[
1594,
1604
],
[
1951,
1961
],
[
2241,
2251
]
],
[
[
728,
739
],
[
864,
875
],
[
1015,
1026
]
],
[
[
767,
778
],
[
944,
955
],
[
1110,
1121
]
],
[
[
806,
816
],
[
1877,
1887
]
],
[
[
886,
896
],
[
1889,
1899
]
],
[
[
966,
977
],
[
1178,
1189
],
[
1740,
1751
]
],
[
[
1061,
1072
],
[
1367,
1378
],
[
1800,
1811
]
],
[
[
1160,
1340
],
[
1670,
1674
]
],
[
[
1349,
1531
],
[
1688,
1692
]
],
[
[
1554,
1565
]
],
[
[
1914,
1937
]
],
[
[
2208,
2227
]
]
] |
from collections import defaultdict
from operator import itemgetter
# python -m movies_recommender.RecommenderSVD
from movies_analyzer.Movies import Movies
from movies_analyzer.RecommendationDataset import RecommendationDataSet
from movies_recommender.Recommender import Recommender
from surprise import SVD, KNNBasic
from movies_recommender.utils import get_top_n
class RecommenderSVD(Recommender):
def __init__(self, recommendation_dataset: RecommendationDataSet):
super(RecommenderSVD, self).__init__(recommendation_dataset.movies)
self.algorithm = SVD()
self.recommendation_dataset = recommendation_dataset
def fit(self, dataset):
return self.algorithm.fit(dataset)
def test(self, test_set):
return self.algorithm.test(test_set)
def get_recommendation(self, watched, k=20):
# get dataset
new_user_id, full_dataset = self.recommendation_dataset.get_dataset_with_extended_user(watched)
inner_user_id = full_dataset.to_inner_uid(new_user_id)
# after new dataset we need again train our model with the new user for the whole
# dataset with the new user.
self.algorithm.fit(full_dataset)
# watched movies
watched = {full_dataset.to_inner_iid(key): value for key,value in watched.items()}
# Calculate for all similar user, predictions
test_items = [
self.algorithm.predict(new_user_id, full_dataset.to_raw_iid(i))
for i in range(0, full_dataset.n_items)
if i not in watched
]
topn_items = [i[0] for i in get_top_n(test_items, n=k, minimum_rating=1.0)[new_user_id]]
return self.movies.get_movie_by_movie_ids(topn_items)
if __name__ == '__main__':
from movies_recommender.Recommender import test_recommendation
from movies_recommender.RecommenderSVD import RecommenderSVD
from movies_analyzer.RecommendationDataset import RecommendationDataSet
from movies_analyzer.Movies import Movies
movies = Movies()
recommendation_dataset = RecommendationDataSet(movies=movies)
recommender = RecommenderSVD(recommendation_dataset)
assert recommender.__module__[:len('movies_recommender.')] == 'movies_recommender.'
test_recommendation(recommender, recommendation_dataset,
example_items=['arek','mateusz'], anti_test=True)
""" For test only
%load_ext autoreload
%autoreload 2
from filmweb_integrator.fwimdbmerge.filmweb import Filmweb
from filmweb_integrator.fwimdbmerge.merger import Merger, get_json_df
from movies_recommender.Recommender import get_moviescore_df, get_watched
recommender.fit(recommendation_dataset.full_dataset)
self = recommender
# get recommendation for one user
merger = Merger(filmweb=Filmweb(), imdb=movies.imdb)
watched = get_watched(get_moviescore_df(merger, recommender.movies,'arek'))
k = 20
k_inner_item = 20
self.get_recommendation(watched)
"""
| [
[
[
24,
35
]
],
[
[
57,
67
]
],
[
[
150,
156
]
],
[
[
207,
228
],
[
451,
472
]
],
[
[
272,
283
],
[
390,
401
]
],
[
[
305,
308
],
[
576,
579
]
],
[
[
310,
318
]
],
[
[
357,
366
],
[
1603,
1612
]
],
[
[
375,
389
],
[
489,
503
]
],
[
[
1802,
1821
],
[
2248,
2267
]
],
[
[
1872,
1886
],
[
2116,
2130
],
[
489,
503
]
],
[
[
1941,
1962
],
[
2061,
2082
],
[
451,
472
]
],
[
[
2002,
2008
],
[
2023,
2029
]
],
[
[
2014,
2020
],
[
2090,
2096
]
],
[
[
2036,
2058
],
[
2131,
2153
],
[
2281,
2303
]
],
[
[
2102,
2113
],
[
2167,
2178
],
[
2268,
2279
]
]
] |
"""
Copyright 2019 Glen Harmon
MNTNER Object Description
https://www.ripe.net/manage-ips-and-asns/db/support/documentation/ripe-database-documentation/rpsl-object-types/4-3-descriptions-of-secondary-objects/4-3-4-description-of-the-mntner-object
"""
from .rpsl import Rpsl
class Maintainer(Rpsl):
def __init__(self):
self.handle = None
self.description = list()
self.update_to = list()
self.maintainer_notify = list()
self.authentication = list()
super().__init__()
def html(self, heading_level=1):
return super().html(
title='Maintainer',
attributes=[
(None, self.handle),
('Description', self.description),
('Update To', self.update_to),
('Maintainer Notify', self.maintainer_notify),
('Authentication', self.authentication),
('Organisation', self.organisation),
('Admin Contact', self.admin_contact),
('Technical Contact', self.technical_contact),
('Remarks', self.remarks),
('Notify', self.notify),
('Maintained By', self.maintained_by),
('Modified', self.modified),
('Type', self.type_),
]
)
| [
[
[
271,
275
],
[
295,
299
]
],
[
[
284,
294
]
]
] |
from django.db.models import query
from .query import SafeDeleteQuery
from functools import partial, reduce
from django.db.models.constants import LOOKUP_SEP
from django.db.models import Max, Min, F
from django.utils.module_loading import import_string
def get_lookup_value(obj, field):
return reduce(lambda i, f: getattr(i, f), field.split(LOOKUP_SEP), obj)
class SafeDeleteQueryset(query.QuerySet):
"""Default queryset for the SafeDeleteManager.
Takes care of "lazily evaluating" safedelete QuerySets. QuerySets passed
within the ``SafeDeleteQueryset`` will have all of the models available.
The deleted policy is evaluated at the very end of the chain when the
QuerySet itself is evaluated.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
super(SafeDeleteQueryset, self).__init__(model=model, query=query, using=using, hints=hints)
self.query = query or SafeDeleteQuery(self.model)
def delete(self, force_policy=None):
"""Overrides bulk delete behaviour.
.. note::
The current implementation loses performance on bulk deletes in order
to safely delete objects according to the deletion policies set.
.. seealso::
:py:func:`safedelete.models.SafeDeleteModel.delete`
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
# TODO: Replace this by bulk update if we can
for obj in self.all():
obj.delete(force_policy=force_policy)
self._result_cache = None
delete.alters_data = True
def undelete(self, force_policy=None):
"""Undelete all soft deleted models.
.. note::
The current implementation loses performance on bulk undeletes in
order to call the pre/post-save signals.
.. seealso::
:py:func:`safedelete.models.SafeDeleteModel.undelete`
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with undelete."
# TODO: Replace this by bulk update if we can (need to call pre/post-save signal)
for obj in self.all():
obj.undelete(force_policy=force_policy)
self._result_cache = None
undelete.alters_data = True
def all(self, force_visibility=None):
"""Override so related managers can also see the deleted models.
A model's m2m field does not easily have access to `all_objects` and
so setting `force_visibility` to True is a way of getting all of the
models. It is not recommended to use `force_visibility` outside of related
models because it will create a new queryset.
Args:
force_visibility: Force a deletion visibility. (default: {None})
"""
if force_visibility is not None:
self.query._safedelete_force_visibility = force_visibility
return super(SafeDeleteQueryset, self).all()
def filter(self, *args, **kwargs):
# Return a copy, see #131
queryset = self._clone()
queryset.query.check_field_filter(**kwargs)
return super(SafeDeleteQueryset, queryset).filter(*args, **kwargs)
class OrderedSafeDeleteQueryset(SafeDeleteQueryset):
"""
# ADDED BY LEE
This extends SafeDeleteQueryset with methods from OrderedModelQuerySet
of the django-ordered-model package, so that we can have both proper ordering and
safe-deletion
"""
def _get_order_field_name(self):
return self.model.order_field_name
def _get_order_field_lookup(self, lookup):
order_field_name = self._get_order_field_name()
return LOOKUP_SEP.join([order_field_name, lookup])
def _get_order_with_respect_to(self):
model = self.model
order_with_respect_to = model.order_with_respect_to
if isinstance(order_with_respect_to, str):
order_with_respect_to = (order_with_respect_to,)
if order_with_respect_to is None:
raise AssertionError(
(
'ordered model admin "{0}" has not specified "order_with_respect_to"; note that this '
"should go in the model body, and is not to be confused with the Meta property of the same name, "
"which is independent Django functionality"
).format(model)
)
return order_with_respect_to
def get_max_order(self):
order_field_name = self._get_order_field_name()
return self.aggregate(Max(order_field_name)).get(
self._get_order_field_lookup("max")
)
def get_min_order(self):
order_field_name = self._get_order_field_name()
return self.aggregate(Min(order_field_name)).get(
self._get_order_field_lookup("min")
)
def get_next_order(self):
order = self.get_max_order()
return order + 1 if order is not None else 0
def above(self, order, inclusive=False):
"""Filter items above order."""
lookup = "gte" if inclusive else "gt"
return self.filter(**{self._get_order_field_lookup(lookup): order})
def above_instance(self, ref, inclusive=False):
"""Filter items above ref's order."""
order_field_name = self._get_order_field_name()
order = getattr(ref, order_field_name)
return self.above(order, inclusive=inclusive)
def below(self, order, inclusive=False):
"""Filter items below order."""
lookup = "lte" if inclusive else "lt"
return self.filter(**{self._get_order_field_lookup(lookup): order})
def below_instance(self, ref, inclusive=False):
"""Filter items below ref's order."""
order_field_name = self._get_order_field_name()
order = getattr(ref, order_field_name)
return self.below(order, inclusive=inclusive)
def decrease_order(self, **extra_kwargs):
"""Decrease `order_field_name` value by 1."""
order_field_name = self._get_order_field_name()
update_kwargs = {order_field_name: F(order_field_name) - 1}
if extra_kwargs:
update_kwargs.update(extra_kwargs)
return self.update(**update_kwargs)
def increase_order(self, **extra_kwargs):
"""Increase `order_field_name` value by 1."""
order_field_name = self._get_order_field_name()
update_kwargs = {order_field_name: F(order_field_name) + 1}
if extra_kwargs:
update_kwargs.update(extra_kwargs)
return self.update(**update_kwargs)
def bulk_create(self, objs, batch_size=None):
order_field_name = self._get_order_field_name()
order_with_respect_to = self.model.order_with_respect_to
objs = list(objs)
if order_with_respect_to:
order_with_respect_to_mapping = {}
order_with_respect_to = self._get_order_with_respect_to()
for obj in objs:
key = tuple(
get_lookup_value(obj, field) for field in order_with_respect_to
)
if key in order_with_respect_to_mapping:
order_with_respect_to_mapping[key] += 1
else:
order_with_respect_to_mapping[
key
] = self.filter_by_order_with_respect_to(obj).get_next_order()
setattr(obj, order_field_name, order_with_respect_to_mapping[key])
else:
for order, obj in enumerate(objs, self.get_next_order()):
setattr(obj, order_field_name, order)
return super().bulk_create(objs, batch_size=batch_size)
def _get_order_with_respect_to_filter_kwargs(self, ref):
order_with_respect_to = self._get_order_with_respect_to()
_get_lookup_value = partial(get_lookup_value, ref)
return {field: _get_lookup_value(field) for field in order_with_respect_to}
_get_order_with_respect_to_filter_kwargs.queryset_only = False
def filter_by_order_with_respect_to(self, ref):
order_with_respect_to = self.model.order_with_respect_to
if order_with_respect_to:
filter_kwargs = self._get_order_with_respect_to_filter_kwargs(ref)
return self.filter(**filter_kwargs)
return self
| [
[
[
29,
34
],
[
393,
398
]
],
[
[
55,
70
],
[
936,
951
]
],
[
[
93,
100
],
[
7796,
7803
]
],
[
[
102,
108
],
[
301,
307
]
],
[
[
148,
158
],
[
348,
358
],
[
3657,
3667
]
],
[
[
188,
191
],
[
4526,
4529
]
],
[
[
193,
196
],
[
4728,
4731
]
],
[
[
198,
199
],
[
6063,
6064
],
[
6404,
6405
]
],
[
[
240,
253
]
],
[
[
260,
276
],
[
6972,
6988
],
[
7804,
7820
]
],
[
[
374,
392
],
[
3222,
3240
],
[
819,
837
],
[
2922,
2940
],
[
3134,
3152
]
],
[
[
3196,
3221
]
]
] |
import numpy as np
from itertools import product
from markovGames.gameDefs.mdpDefs import Policy
def getAllDetPol(numStates, numActions):
detProbs = [np.array([1 if j == i else 0 for j in range(numActions)]) for i in range(numActions)]
return product(detProbs, repeat=numStates)
def getPolList(states, acSet):
# list of possible deterministic policies
numStates = len(states)
numActions = len(acSet)
detPol = getAllDetPol(numStates, numActions)
return [Policy(states, pol, acSet) for pol in detPol]
def prodPolList(states, listActions):
# get policies for each action Set
polList = [getPolList(states, ac) for ac in listActions]
return polList
def getPayoff(utilMap, listAcSet):
# utilMap: maps list of agent policies to real numbers,
# allPolicyList: list of agent i (list of possible policies)
def utilInd(index):
jointAc = [listAcSet[j][ind] for j, ind in enumerate(index)]
val = utilMap(jointAc)
return val
numPL = [len(pL) for pL in listAcSet]
payoff = np.zeros(numPL)
for ind in product(*[range(nI) for nI in numPL]):
payoff[ind] = utilInd(ind)
return payoff
def getArgOpt(tensor):
return np.unravel_index(np.argmax(tensor), tensor.shape)
def bruteFindNash(payoffList):
TOLERANCE = 1e-7
cpnes = list(np.argwhere(payoffList[0] > np.amax(payoffList[0], 0) - TOLERANCE))
cpnes = [tuple(cpne) for cpne in cpnes]
N = len(payoffList)
for i in range(1, N):
pMat = payoffList[i]
for cpne in cpnes[:]:
ind = cpne[:i] + (slice(None),) + cpne[i + 1:]
if pMat[cpne] < np.max(pMat[ind]) - TOLERANCE:
cpnes.pop(cpnes.index(cpne))
return cpnes
def getEfficiency(cpnes, welfareMat):
# welfareMat - matrix form of welfare
pneWelf = [welfareMat[cpne] for cpne in cpnes]
opt = np.max(welfareMat)
priceRatios = [float(pne) / opt for pne in pneWelf]
return priceRatios
def getPoA(cpnes, welfareMat):
return min(getEfficiency(cpnes, welfareMat))
| [
[
[
7,
18
],
[
156,
158
],
[
1051,
1053
],
[
1210,
1212
],
[
1227,
1229
],
[
1331,
1333
],
[
1359,
1361
],
[
1640,
1642
],
[
1876,
1878
]
],
[
[
41,
48
],
[
253,
260
],
[
1082,
1089
]
],
[
[
90,
96
],
[
485,
491
]
],
[
[
103,
115
],
[
437,
449
]
],
[
[
295,
305
],
[
625,
635
]
],
[
[
537,
548
]
],
[
[
696,
705
]
],
[
[
1180,
1189
]
],
[
[
1266,
1279
]
],
[
[
1739,
1752
],
[
2022,
2035
]
],
[
[
1980,
1986
]
]
] |
#!/usr/bin/env python
# ----------------------------------------------------------------------- #
# Copyright 2017, Gregor von Laszewski, Indiana University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ------------------------------------------------------------------------#
from setuptools import find_packages, setup
import io
def readfile(filename):
with io.open(filename, encoding="utf-8") as stream:
return stream.read().split()
#requiers = readfile ('requirements.txt')
#
# add minimum requirements here
#
requiers = """
psutil
pygments
""".split("\n")
# dependency_links = ['http://github.com/nicolaiarocci/eve.git@develop']
version = readfile("VERSION")[0].strip()
with open('README.md') as f:
long_description = f.read()
NAME = "cloudmesh-john"
DESCRIPTION = "A command called john and foo for the cloudmesh shell"
AUTHOR = "Gregor von Laszewski"
AUTHOR_EMAIL = "laszewski@gmail.com"
URL = "https://github.com/cloudmesh/cloudmesh-john"
setup(
name=NAME,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
version=version,
license="Apache 2.0",
url=URL,
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
install_requires=requiers,
tests_require=[
"flake8",
"coverage",
],
zip_safe=False,
namespace_packages=['cloudmesh'],
)
| [
[
[
1186,
1199
],
[
2124,
2137
]
],
[
[
1201,
1206
],
[
1860,
1865
]
],
[
[
1214,
1216
],
[
1251,
1253
]
],
[
[
1222,
1230
],
[
1547,
1555
]
],
[
[
1415,
1423
],
[
2559,
2567
]
],
[
[
1537,
1544
],
[
2063,
2070
]
],
[
[
1605,
1606
],
[
1631,
1632
]
],
[
[
1612,
1628
],
[
1982,
1998
]
],
[
[
1643,
1647
],
[
1876,
1880
]
],
[
[
1667,
1678
],
[
1948,
1959
]
],
[
[
1737,
1743
],
[
1893,
1899
]
],
[
[
1769,
1781
],
[
1918,
1930
]
],
[
[
1806,
1809
],
[
2106,
2109
]
]
] |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Discrete Wavelet Transformation for Characterization of Time-Series Data.
"""
import numpy as np
from utils import InputData, InputTypes, xmlUtils
from .TimeSeriesAnalyzer import TimeSeriesGenerator, TimeSeriesCharacterizer
# utility methods
class Wavelet(TimeSeriesGenerator, TimeSeriesCharacterizer):
"""
Perform Discrete Wavelet Transformation on time-dependent data.
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for class cls.
@ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.
"""
specs = super(Wavelet, cls).getInputSpecification()
specs.name = 'wavelet'
specs.description = r"""Discrete Wavelet TimeSeriesAnalysis algorithm. Performs a discrete wavelet transform
on time-dependent data. Note: This TSA module requires pywavelets to be installed within your
python environment."""
specs.addSub(InputData.parameterInputFactory(
'family',
contentType=InputTypes.StringType,
descr=r"""The type of wavelet to use for the transformation.
There are several possible families to choose from, and most families contain
more than one variation. For more information regarding the wavelet families,
refer to the Pywavelets documentation located at:
https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html (wavelet-families)
\\
Possible values are:
\begin{itemize}
\item \textbf{haar family}: haar
\item \textbf{db family}: db1, db2, db3, db4, db5, db6, db7, db8, db9, db10, db11,
db12, db13, db14, db15, db16, db17, db18, db19, db20, db21, db22, db23,
db24, db25, db26, db27, db28, db29, db30, db31, db32, db33, db34, db35,
db36, db37, db38
\item \textbf{sym family}: sym2, sym3, sym4, sym5, sym6, sym7, sym8, sym9, sym10,
sym11, sym12, sym13, sym14, sym15, sym16, sym17, sym18, sym19, sym20
\item \textbf{coif family}: coif1, coif2, coif3, coif4, coif5, coif6, coif7, coif8,
coif9, coif10, coif11, coif12, coif13, coif14, coif15, coif16, coif17
\item \textbf{bior family}: bior1.1, bior1.3, bior1.5, bior2.2, bior2.4, bior2.6,
bior2.8, bior3.1, bior3.3, bior3.5, bior3.7, bior3.9, bior4.4, bior5.5,
bior6.8
\item \textbf{rbio family}: rbio1.1, rbio1.3, rbio1.5, rbio2.2, rbio2.4, rbio2.6,
rbio2.8, rbio3.1, rbio3.3, rbio3.5, rbio3.7, rbio3.9, rbio4.4, rbio5.5,
rbio6.8
\item \textbf{dmey family}: dmey
\item \textbf{gaus family}: gaus1, gaus2, gaus3, gaus4, gaus5, gaus6, gaus7, gaus8
\item \textbf{mexh family}: mexh
\item \textbf{morl family}: morl
\item \textbf{cgau family}: cgau1, cgau2, cgau3, cgau4, cgau5, cgau6, cgau7, cgau8
\item \textbf{shan family}: shan
\item \textbf{fbsp family}: fbsp
\item \textbf{cmor family}: cmor
\end{itemize}"""))
return specs
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a time-series analysis object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, spec, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['family'] = spec.findFirst('family').value
return settings
def characterize(self, signal, pivot, targets, settings):
"""
This function utilizes the Discrete Wavelet Transform to
characterize a time-dependent series of data.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
"""
# TODO extend to continuous wavelet transform
try:
import pywt
except ModuleNotFoundError:
print("This RAVEN TSA Module requires the PYWAVELETS library to be installed in the current python environment")
raise ModuleNotFoundError
## The pivot input parameter isn't used explicity in the
## transformation as it assumed/required that each element in the
## time-dependent series is independent, uniquely indexed and
## sorted in time.
family = settings['family']
params = {target: {'results': {}} for target in targets}
for i, target in enumerate(targets):
results = params[target]['results']
results['coeff_a'], results['coeff_d'] = pywt.dwt(signal[:, i], family)
return params
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
# FIXME we don't know a priori how many entries will be in the decomp, so we can't register it yet!
raise NotImplementedError('Cannot predict variables for Wavelet!')
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
# FIXME we don't know a priori how many entries will be in the decomp, so we can't register it yet!
raise NotImplementedError('Cannot predict variables for Wavelet!')
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, values in info['results'].items():
for v, val in enumerate(values):
rlz[f'{base}__{name}__{v}'] = val
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic ARMA signal
"""
try:
import pywt
except ModuleNotFoundError:
print("This RAVEN TSA Module requires the PYWAVELETS library to be installed in the current python environment")
raise ModuleNotFoundError
synthetic = np.zeros((len(pivot), len(params)))
family = settings['family']
for t, (target, _) in enumerate(params.items()):
results = params[target]['results']
cA = results['coeff_a']
cD = results['coeff_d']
synthetic[:, t] = pywt.idwt(cA, cD, family)
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None
"""
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['results'].items():
base.append(xmlUtils.newNode(name, text=','.join([str(v) for v in value])))
| [
[
[
680,
691
],
[
7317,
7319
]
],
[
[
711,
720
],
[
1569,
1578
]
],
[
[
722,
732
],
[
1636,
1646
]
],
[
[
734,
742
],
[
7956,
7964
],
[
8078,
8086
]
],
[
[
775,
794
],
[
854,
873
]
],
[
[
796,
819
],
[
875,
898
]
],
[
[
846,
853
],
[
1241,
1248
]
]
] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
from selenium.common import exceptions
from selenium.webdriver.common import by
class ElementTable(tables.TableRegion):
name = "element"
CREATE_FORM_FIELDS = ()
EDIT_FORM_FIELDS = ()
@tables.bind_table_action('create')
def create(self, create_button):
create_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.CREATE_FORM_FIELDS)
@tables.bind_table_action('delete')
def delete(self, delete_button):
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_row_action('edit', primary=True)
def edit(self, edit_button, row):
edit_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.EDIT_FORM_FIELDS)
class SubnetsTable(ElementTable):
name = "subnets"
CREATE_FORM_FIELDS = (("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway"),
("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes"))
EDIT_FORM_FIELDS = CREATE_FORM_FIELDS
@tables.bind_table_action('create')
def create(self, create_button):
create_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.CREATE_FORM_FIELDS)
@tables.bind_row_action('edit')
def edit(self, edit_button):
edit_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.EDIT_FORM_FIELDS)
class NetworksTable(ElementTable):
name = "networks"
CREATE_FORM_FIELDS = (("net_name", "admin_state", "shared",
"with_subnet"),
("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway"),
("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes"))
EDIT_FORM_FIELDS = ("name", "network_id", "admin_state",
"shared")
ADD_SUBNET_FORM_FIELDS = (("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway"),
("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes"))
@tables.bind_table_action('create')
def create(self, create_button):
create_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.CREATE_FORM_FIELDS)
@tables.bind_row_action('subnet')
def edit_add_subnet(self, edit_button, row):
edit_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.ADD_SUBNET_FORM_FIELDS)
@tables.bind_row_action('delete')
def edit_delete_network(self, delete_button, row):
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
class NetworksPage(basepage.BaseNavigationPage):
DEFAULT_ADMIN_STATE = 'True'
DEFAULT_CREATE_SUBNET = True
DEFAULT_IP_VERSION = '4'
DEFAULT_DISABLE_GATEWAY = False
DEFAULT_ENABLE_DHCP = True
NETWORKS_TABLE_NAME_COLUMN = 'name'
NETWORKS_TABLE_STATUS_COLUMN = 'status'
SUBNET_TAB_INDEX = 1
DETAILS_TAB_INDEX = 2
def __init__(self, driver, conf):
super(NetworksPage, self).__init__(driver, conf)
self._page_title = "Networks"
def _get_row_with_network_name(self, name):
return self.networks_table.get_row(
self.NETWORKS_TABLE_NAME_COLUMN, name)
@property
def networks_table(self):
return NetworksTable(self.driver, self.conf)
def create_network(self, network_name, subnet_name,
admin_state=DEFAULT_ADMIN_STATE,
create_subnet=DEFAULT_CREATE_SUBNET,
network_address=None, ip_version=DEFAULT_IP_VERSION,
gateway_ip=None,
disable_gateway=DEFAULT_DISABLE_GATEWAY,
enable_dhcp=DEFAULT_ENABLE_DHCP, allocation_pools=None,
dns_name_servers=None, host_routes=None):
create_network_form = self.networks_table.create()
create_network_form.net_name.text = network_name
create_network_form.admin_state.value = admin_state
if not create_subnet:
create_network_form.with_subnet.unmark()
else:
create_network_form.switch_to(self.SUBNET_TAB_INDEX)
create_network_form.subnet_name.text = subnet_name
if network_address is None:
network_address = self.conf.network.network_cidr
create_network_form.cidr.text = network_address
create_network_form.ip_version.value = ip_version
if gateway_ip is not None:
create_network_form.gateway_ip.text = gateway_ip
if disable_gateway:
create_network_form.disable_gateway.mark()
create_network_form.switch_to(self.DETAILS_TAB_INDEX)
if not enable_dhcp:
create_network_form.enable_dhcp.unmark()
if allocation_pools is not None:
create_network_form.allocation_pools.text = allocation_pools
if dns_name_servers is not None:
create_network_form.dns_nameservers.text = dns_name_servers
if host_routes is not None:
create_network_form.host_routes.text = host_routes
create_network_form.submit()
def delete_network(self, name):
row = self._get_row_with_network_name(name)
confirm_delete_networks_form = \
self.networks_table.edit_delete_network(row)
confirm_delete_networks_form.submit()
def is_network_present(self, name):
return bool(self._get_row_with_network_name(name))
def is_network_active(self, name):
def cell_getter():
row = self._get_row_with_network_name(name)
return row and row.cells[self.NETWORKS_TABLE_STATUS_COLUMN]
return bool(self.networks_table.wait_cell_status(cell_getter,
'Active'))
def add_subnet(self, net_name, subnet_name,
network_address=None, ip_version=DEFAULT_IP_VERSION,
gateway_ip=None,
disable_gateway=DEFAULT_DISABLE_GATEWAY,
enable_dhcp=DEFAULT_ENABLE_DHCP, allocation_pools=None,
dns_name_servers=None, host_routes=None):
row = self._get_row_with_network_name(net_name)
add_subnet_form = self.networks_table.edit_add_subnet(row)
add_subnet_form.subnet_name.text = subnet_name
if network_address is None:
network_address = self.conf.network.network_cidr
add_subnet_form.cidr.text = network_address
add_subnet_form.ip_version.value = ip_version
if gateway_ip is not None:
add_subnet_form.gateway_ip.text = gateway_ip
if disable_gateway:
add_subnet_form.disable_gateway.mark()
add_subnet_form.switch_to(self.SUBNET_TAB_INDEX)
if not enable_dhcp:
add_subnet_form.enable_dhcp.unmark()
if allocation_pools is not None:
add_subnet_form.allocation_pools.text = allocation_pools
if dns_name_servers is not None:
add_subnet_form.dns_nameservers.text = dns_name_servers
if host_routes is not None:
add_subnet_form.host_routes.text = host_routes
add_subnet_form.submit()
return NetworkOverviewPage(self.driver, self.conf, net_name)
def go_to_overview(self, name):
_network_items_locator = (by.By.CSS_SELECTOR, 'a[href$="/detail"]')
net_items = self._get_elements(*_network_items_locator)
for item in net_items:
if item.text == name:
item.click()
break
else:
raise exceptions.NoSuchElementException(
"Not found element with text: %s" % name)
return NetworkOverviewPage(self.driver, self.conf, name)
class NetworkOverviewPage(basepage.BaseNavigationPage):
DEFAULT_ADMIN_STATE = 'True'
DEFAULT_IP_VERSION = '4'
DEFAULT_DISABLE_GATEWAY = False
DEFAULT_ENABLE_DHCP = True
DETAILS_TAB_INDEX = 1
TABLE_NAME_COLUMN = 'name'
_edit_network_locator = (
by.By.CSS_SELECTOR,
'form.actions_column > .btn-group > a.btn:nth-child(1)')
_dropdown_open_locator = (
by.By.CSS_SELECTOR,
'form.actions_column > .btn-group > a.btn:nth-child(2)')
_dropdown_menu_locator = (
by.By.CSS_SELECTOR,
'form.actions_column > .btn-group > ul.row_actions > li > *')
def __init__(self, driver, conf, network_name):
super(NetworkOverviewPage, self).__init__(driver, conf)
self._page_title = "Network Details: {}".format(network_name)
@property
def subnets_table(self):
return SubnetsTable(self.driver, self.conf)
def _get_row_with_name(self, name, table):
return table.get_row(self.TABLE_NAME_COLUMN, name)
def _get_row_action(self, action_name):
open_dropdown_elem = self._get_element(*self._dropdown_open_locator)
open_dropdown_elem.click()
for action in self._get_elements(*self._dropdown_menu_locator):
pattern = "__action_%s" % action_name
if action.get_attribute('id').endswith(pattern):
action_element = action
break
return action_element
def delete_network(self):
delete_elem = self._get_row_action('delete')
delete_elem.click()
confirm_delete_network_form = forms.BaseFormRegion(self.driver,
self.conf)
confirm_delete_network_form.submit()
return NetworksPage(self.driver, self.conf)
def create_subnet(self, subnet_name,
network_address=None, ip_version=DEFAULT_IP_VERSION,
gateway_ip=None,
disable_gateway=DEFAULT_DISABLE_GATEWAY,
enable_dhcp=DEFAULT_ENABLE_DHCP, allocation_pools=None,
dns_name_servers=None, host_routes=None):
create_subnet_form = self.subnets_table.create()
create_subnet_form.subnet_name.text = subnet_name
if network_address is None:
network_address = self.conf.network.network_cidr
create_subnet_form.cidr.text = network_address
create_subnet_form.ip_version.value = ip_version
if gateway_ip is not None:
create_subnet_form.gateway_ip.text = gateway_ip
if disable_gateway:
create_subnet_form.disable_gateway.mark()
create_subnet_form.tabs.switch_to(self.DETAILS_TAB_INDEX)
if not enable_dhcp:
create_subnet_form.enable_dhcp.unmark()
if allocation_pools is not None:
create_subnet_form.allocation_pools.text = allocation_pools
if dns_name_servers is not None:
create_subnet_form.dns_nameservers.text = dns_name_servers
if host_routes is not None:
create_subnet_form.host_routes.text = host_routes
create_subnet_form.submit()
def delete_subnet(self, name):
row = self._get_row_with_name(name, self.subnets_table)
row.mark()
confirm_delete_subnet_form = self.subnets_table.delete()
confirm_delete_subnet_form.submit()
def is_subnet_present(self, name):
return bool(self._get_row_with_name(name, self.subnets_table))
| [
[
[
634,
642
],
[
3882,
3890
],
[
9085,
9093
]
],
[
[
706,
711
],
[
1102,
1107
],
[
1338,
1343
],
[
1515,
1520
],
[
2093,
2098
],
[
2316,
2321
],
[
3322,
3327
],
[
3563,
3568
],
[
3816,
3821
],
[
10649,
10654
]
],
[
[
775,
781
],
[
883,
889
],
[
985,
991
],
[
1221,
1227
],
[
1389,
1395
],
[
1976,
1982
],
[
2209,
2215
],
[
3205,
3211
],
[
3438,
3444
],
[
3683,
3689
]
],
[
[
810,
820
],
[
8899,
8909
]
],
[
[
859,
861
],
[
9339,
9341
],
[
9463,
9465
],
[
9587,
9589
],
[
8644,
8646
]
],
[
[
870,
882
],
[
1647,
1659
],
[
2446,
2458
]
],
[
[
1634,
1646
],
[
9923,
9935
]
],
[
[
2432,
2445
],
[
4547,
4560
]
],
[
[
3869,
3881
],
[
4262,
4274
],
[
10813,
10825
]
],
[
[
9065,
9084
],
[
8519,
8538
],
[
9007,
9026
],
[
9744,
9763
]
]
] |
from hikyuu import PG_FixedPercent
# 部件作者
author = "fasiondog"
# 版本
version = '20200825'
def part(p=0.2):
return PG_FixedPercent(p)
part.__doc__ = PG_FixedPercent.__doc__
if __name__ == '__main__':
print(part()) | [
[
[
19,
34
],
[
157,
172
],
[
121,
136
]
],
[
[
43,
49
]
],
[
[
70,
77
]
],
[
[
97,
101
],
[
142,
146
],
[
219,
223
]
]
] |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_flex_volume_source import V1FlexVolumeSource
class TestV1FlexVolumeSource(unittest.TestCase):
""" V1FlexVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1FlexVolumeSource(self):
"""
Test V1FlexVolumeSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_flex_volume_source.V1FlexVolumeSource()
pass
if __name__ == '__main__':
unittest.main()
| [
[
[
281,
296
]
],
[
[
305,
307
]
],
[
[
315,
318
]
],
[
[
326,
334
],
[
518,
526
],
[
964,
972
]
],
[
[
343,
360
]
],
[
[
396,
408
]
],
[
[
468,
486
]
],
[
[
495,
517
]
]
] |
from fineract.objects.fineract_object import DataFineractObject
from fineract.objects.types import Type
class Group(DataFineractObject):
"""
This class represents a Group.
"""
def __repr__(self):
return self.get__repr__({'group_id': self.id})
def _init_attributes(self):
self.id = None
self.account_no = None
self.external_id = None
self.name = None
self.status = None
self.active = None
self.activation_date = None
self.office_id = None
self.office_name = None
self.hierarchy = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.account_no = attributes.get('accountNo', None)
self.external_id = attributes.get('externalId', None)
self.name = attributes.get('name', None)
self.status = self._make_fineract_object(GroupStatus, attributes.get('status', None))
self.active = attributes.get('active', None)
self.activation_date = self._make_date_object(attributes.get('activationDate', None))
self.office_id = attributes.get('officeId', None)
self.office_name = attributes.get('officeName', None)
self.hierarchy = attributes.get('hierarchy', None)
def add_members(self, members_list):
params = {
'clientMembers': members_list
}
data = self.request_handler.make_request(
'POST',
'/groups/{}?command=associateClients'.format(self.id),
json=params
)
return data['groupId'] == self.id
def remove_members(self, members_list):
params = {
'clientMembers': members_list
}
data = self.request_handler.make_request(
'POST',
'/groups/{}?command=disassociateClients'.format(self.id),
json=params
)
return data['groupId'] == self.id
@classmethod
def create(cls, request_handler, name, office_id, active=True, activation_date=None):
"""Create a group
:param request_handler:
:param name:
:param office_id:
:param active:
:param activation_date:
:rtype: :class:`fineract.objects.group.Group`
"""
data = {
'name': name,
'officeId': office_id,
'active': active,
'activationDate': activation_date or cls._get_current_date()
}
res = request_handler.make_request(
'POST',
'/groups',
json=data
)
group_id = res['groupId']
return cls(request_handler,
request_handler.make_request(
'GET',
'/groups/{}'.format(group_id)
), False)
@classmethod
def get_group_by_name(cls, request_handler, name):
"""Get a group by name
:param request_handler:
:param name:
:rtype: :class:`fineract.objects.group.Group`
"""
data = request_handler.make_request(
'GET',
'/groups'
)
if data:
for item in data:
if item['name'] == name:
print(item)
return cls(request_handler, item, False)
return None
class GroupStatus(Type):
"""
This class represents a Group status.
"""
pass
| [
[
[
45,
63
],
[
118,
136
]
],
[
[
99,
103
],
[
3355,
3359
]
],
[
[
112,
117
]
],
[
[
3343,
3354
],
[
904,
915
]
]
] |
import logging
import requests
from django.conf import settings
from .base import BaseSmsClient
logger = logging.getLogger("notifier")
class CGSmsClient(BaseSmsClient):
@classmethod
def send(cls, number: str, text: str, **kwargs):
sub_account = settings.NOTIFIER["SMS"]["GATEWAYS"]["CGS"]["SUB_ACCOUNT"]
sub_account_pass = settings.NOTIFIER["SMS"]["GATEWAYS"]["CGS"]["SUB_ACCOUNT_PASSWORD"]
params = {
"sub_account": sub_account,
"sub_account_pass": sub_account_pass,
"action": "send_sms",
"message": text,
"recipients": number,
}
res = requests.get("http://cheapglobalsms.com/api_v1", params=params)
return res
| [
[
[
7,
14
],
[
108,
115
]
],
[
[
23,
31
],
[
651,
659
]
],
[
[
56,
64
],
[
267,
275
],
[
353,
361
]
],
[
[
84,
97
],
[
158,
171
]
],
[
[
99,
105
]
],
[
[
146,
157
]
]
] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
LayerRNNCell = rnn_cell_impl.LayerRNNCell # pylint: disable=invalid-name
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
Value to clip the 'cs' value to. Disable by setting to negative value.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
return gen_lstm_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = gen_lstm_ops.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad,
b_grad) = gen_lstm_ops.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [
None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad
]
class LSTMBlockCell(LayerRNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add `forget_bias` (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike `rnn_cell_impl.LSTMCell`, this is a monolithic op and should be much
faster. The weight and bias matrices should be compatible as long as the
variable scope matches.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
name="lstm_cell"):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.nn.rnn_cell.LSTMCell`.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMBlockCell instead.
"""
super(LSTMBlockCell, self).__init__(_reuse=reuse, name=name)
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
self._cell_clip = cell_clip if cell_clip is not None else -1
self._names = {
"W": "kernel",
"b": "bias",
"wci": "w_i_diag",
"wcf": "w_f_diag",
"wco": "w_o_diag",
"scope": "lstm_cell"
}
# Inputs must be 2-dimensional.
self.input_spec = base_layer.InputSpec(ndim=2)
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
if not inputs_shape[1].value:
raise ValueError(
"Expecting inputs_shape[1] to be set: %s" % str(inputs_shape))
input_size = inputs_shape[1].value
self._kernel = self.add_variable(
self._names["W"], [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
self._names["b"], [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable(self._names["wci"], [self._num_units])
self._w_f_diag = self.add_variable(self._names["wcf"], [self._num_units])
self._w_o_diag = self.add_variable(self._names["wco"], [self._num_units])
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM)."""
if len(state) != 2:
raise ValueError("Expecting state to be a tuple with length 2.")
if self._use_peephole:
wci = self._w_i_diag
wcf = self._w_f_diag
wco = self._w_o_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = state
(_, cs, _, _, _, _, h) = _lstm_block_cell(
inputs,
cs_prev,
h_prev,
self._kernel,
self._bias,
wci=wci,
wcf=wcf,
wco=wco,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
return h, new_state
class LSTMBlockWrapper(base_layer.Layer):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def call(self, inputs, initial_state=None, dtype=None, sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
or a list of `time_len` tensors of shape `[batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" % inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError("Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(
inputs, initial_cell_state, initial_output, dtype, sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(sequence_length, time_len, dtype=dtype),
[1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unstack(outputs)
final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
mod_indices = indices * batch_size + math_ops.range(batch_size)
return array_ops.gather(
array_ops.reshape(data, [-1, self.num_units]), mod_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell_impl.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
name="lstm_fused_cell"):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Default is no cell clipping.
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.nn.rnn_cell.LSTMCell`.
"""
super(LSTMBlockFusedCell, self).__init__(_reuse=reuse, name=name)
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip if cell_clip is not None else -1
self._use_peephole = use_peephole
# Inputs must be 3-dimensional.
self.input_spec = base_layer.InputSpec(ndim=3)
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def build(self, input_shape):
input_size = input_shape[2].value
self._kernel = self.add_variable(
"kernel", [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
"bias", [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable("w_i_diag", [self._num_units])
self._w_f_diag = self.add_variable("w_f_diag", [self._num_units])
self._w_o_diag = self.add_variable("w_o_diag", [self._num_units])
self.built = True
def _call_cell(self,
inputs,
initial_cell_state=None,
initial_output=None,
dtype=None,
sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
if self._use_peephole:
wci = self._w_i_diag
wco = self._w_o_diag
wcf = self._w_f_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=self._kernel,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
| [
[
[
739,
754
]
],
[
[
778,
786
]
],
[
[
810,
824
]
],
[
[
833,
836
],
[
14833,
14836
],
[
14949,
14952
]
],
[
[
877,
889
],
[
4631,
4643
],
[
7635,
7647
],
[
8979,
8991
],
[
10361,
10373
],
[
24551,
24563
]
],
[
[
926,
932
],
[
1370,
1376
]
],
[
[
973,
979
],
[
4521,
4527
],
[
7312,
7318
],
[
7507,
7513
]
],
[
[
1020,
1023
],
[
8286,
8289
],
[
10029,
10032
],
[
18386,
18389
]
],
[
[
1061,
1079
],
[
14585,
14595
],
[
12838,
12848
],
[
22218,
22228
]
],
[
[
1114,
1123
],
[
4493,
4502
],
[
7275,
7284
],
[
7479,
7488
],
[
7699,
7708
],
[
7981,
7990
],
[
8003,
8012
],
[
8026,
8035
],
[
8055,
8064
],
[
8077,
8086
],
[
8100,
8109
],
[
8134,
8143
],
[
9354,
9363
],
[
9477,
9486
],
[
9671,
9680
],
[
14113,
14122
],
[
17370,
17379
],
[
17639,
17648
],
[
17745,
17754
],
[
17974,
17983
],
[
18001,
18010
],
[
18741,
18750
],
[
18772,
18781
],
[
18868,
18877
],
[
18894,
18903
],
[
19239,
19248
],
[
19268,
19277
],
[
19353,
19362
],
[
19382,
19391
],
[
19923,
19932
],
[
20263,
20272
],
[
20289,
20298
],
[
24138,
24147
],
[
24308,
24317
]
],
[
[
1158,
1166
],
[
13469,
13477
],
[
22644,
22652
]
],
[
[
1201,
1209
],
[
9298,
9306
],
[
9715,
9723
],
[
20225,
20233
],
[
24409,
24417
],
[
24467,
24475
],
[
24485,
24493
]
],
[
[
1244,
1250
],
[
9849,
9855
]
],
[
[
1285,
1298
],
[
1468,
1481
],
[
12915,
12928
],
[
14500,
14513
],
[
19969,
19982
]
],
[
[
1338,
1353
],
[
1398,
1413
]
],
[
[
1355,
1367
]
],
[
[
1453,
1465
],
[
10797,
10809
]
],
[
[
1564,
1580
],
[
14208,
14224
]
],
[
[
4966,
4977
]
],
[
[
8225,
8254
]
],
[
[
8328,
8346
]
],
[
[
10067,
10081
]
],
[
[
10783,
10796
],
[
12374,
12387
]
],
[
[
14568,
14584
],
[
20376,
20392
]
],
[
[
20357,
20375
],
[
21928,
21946
]
]
] |
#!/usr/bin/env python3
import argparse
import codecs
import sys
def transform(i,o):
for line in i:
if len(line.strip()) == 0:
continue
key, trans = line.strip().split(None, 1)
ntrans = []
for t in trans.split():
if t.startswith("<"):
continue
ntrans.append(t.lower())
print("{} {}".format(key, " ".join(ntrans)), file=o)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding='utf-8'), default=codecs.getreader('utf-8')(sys.stdin.buffer))
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding='utf-8'), default=codecs.getwriter('utf-8')(sys.stdout.buffer))
args = parser.parse_args()
transform(args.infile, args.outfile) | [
[
[
30,
38
],
[
462,
470
],
[
552,
560
],
[
698,
706
]
],
[
[
46,
52
],
[
602,
608
],
[
748,
754
]
],
[
[
61,
64
],
[
628,
631
],
[
774,
777
]
],
[
[
71,
80
],
[
831,
840
]
],
[
[
453,
459
],
[
506,
512
],
[
651,
657
],
[
806,
812
]
],
[
[
799,
803
],
[
841,
845
],
[
854,
858
]
]
] |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: addressbook.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='addressbook.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11\x61\x64\x64ressbook.proto\"1\n\x06Person\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\r\n\x05\x65mail\x18\x03 \x01(\tb\x06proto3')
)
_PERSON = _descriptor.Descriptor(
name='Person',
full_name='Person',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Person.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='Person.id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='email', full_name='Person.email', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=21,
serialized_end=70,
)
DESCRIPTOR.message_types_by_name['Person'] = _PERSON
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Person = _reflection.GeneratedProtocolMessageType('Person', (_message.Message,), dict(
DESCRIPTOR = _PERSON,
__module__ = 'addressbook_pb2'
# @@protoc_insertion_point(class_scope:Person)
))
_sym_db.RegisterMessage(Person)
# @@protoc_insertion_point(module_scope)
| [
[
[
119,
122
],
[
126,
129
]
],
[
[
123,
125
],
[
641,
643
],
[
1134,
1136
],
[
1831,
1833
]
],
[
[
226,
251
],
[
509,
520
],
[
824,
835
],
[
962,
973
],
[
1322,
1333
],
[
1657,
1668
]
],
[
[
280,
299
],
[
2392,
2400
]
],
[
[
328,
353
],
[
2340,
2351
]
],
[
[
382,
417
],
[
465,
481
]
],
[
[
455,
462
],
[
2287,
2294
],
[
2529,
2536
]
],
[
[
496,
506
],
[
911,
921
],
[
1305,
1315
],
[
1640,
1650
],
[
2002,
2012
],
[
2234,
2244
],
[
2318,
2328
]
],
[
[
814,
821
],
[
2279,
2286
],
[
2433,
2440
]
],
[
[
2331,
2337
],
[
2553,
2559
]
]
] |
"""
Small helpers for code that is not shown in the notebooks
"""
from sklearn import neighbors, datasets, linear_model
import pylab as pl
import numpy as np
from matplotlib.colors import ListedColormap
# Create color maps for 3-class classification problem, as with iris
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def plot_iris_knn():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
knn = neighbors.KNeighborsClassifier(n_neighbors=3)
knn.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.xlabel('sepal length (cm)')
pl.ylabel('sepal width (cm)')
pl.axis('tight')
def plot_polynomial_regression():
rng = np.random.RandomState(0)
x = 2*rng.rand(100) - 1
f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9
y = f(x) + .4 * rng.normal(size=100)
x_test = np.linspace(-1, 1, 100)
pl.figure()
pl.scatter(x, y, s=4)
X = np.array([x**i for i in range(5)]).T
X_test = np.array([x_test**i for i in range(5)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='4th order')
X = np.array([x**i for i in range(10)]).T
X_test = np.array([x_test**i for i in range(10)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='9th order')
pl.legend(loc='best')
pl.axis('tight')
pl.title('Fitting a 4th and a 9th order polynomial')
pl.figure()
pl.scatter(x, y, s=4)
pl.plot(x_test, f(x_test), label="truth")
pl.axis('tight')
pl.title('Ground truth (9th order polynomial)') | [
[
[
87,
96
],
[
635,
644
]
],
[
[
98,
106
],
[
432,
440
]
],
[
[
108,
120
],
[
1694,
1706
],
[
1920,
1932
]
],
[
[
128,
139
],
[
1053,
1055
],
[
1069,
1071
],
[
1152,
1154
],
[
1206,
1208
],
[
1241,
1243
],
[
1275,
1277
],
[
1544,
1546
],
[
1560,
1562
],
[
1749,
1751
],
[
1975,
1977
],
[
2037,
2039
],
[
2063,
2065
],
[
2084,
2086
],
[
2142,
2144
],
[
2158,
2160
],
[
2184,
2186
],
[
2230,
2232
],
[
2251,
2253
]
],
[
[
147,
158
],
[
829,
831
],
[
841,
843
],
[
898,
900
],
[
950,
952
],
[
1338,
1340
],
[
1515,
1517
],
[
1591,
1593
],
[
1641,
1643
],
[
1815,
1817
],
[
1866,
1868
]
],
[
[
189,
203
],
[
287,
301
],
[
349,
363
]
],
[
[
274,
284
],
[
1099,
1109
]
],
[
[
337,
346
],
[
1191,
1200
]
],
[
[
404,
417
]
],
[
[
1298,
1324
]
]
] |
# -*- coding: utf-8 -*-
# Scrapy settings for iwata project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'iwata'
SPIDER_MODULES = ['iwata.spiders']
NEWSPIDER_MODULE = 'iwata.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'iwata (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'iwata.middlewares.IwataSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'iwata.middlewares.IwataDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
# 'iwata.pipelines.IwataPipeline': 100,
# 'iwata.pipelines.JsonWriterPipeline': 200,
'iwata.pipelines.MarkdownWriterPipeline': 300,
'scrapy.pipelines.images.ImagesPipeline': 400,
}
IMAGES_STORE = '_images'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 2
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 10
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
FEED_EXPORT_ENCODING = 'utf-8' | [
[
[
417,
425
]
],
[
[
437,
451
]
],
[
[
472,
488
]
],
[
[
666,
680
]
],
[
[
2097,
2111
]
],
[
[
2299,
2311
]
],
[
[
3029,
3046
]
],
[
[
3223,
3243
]
]
] |
# Copyright 2020 BlueCat Networks (USA) Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# By: BlueCat Networks
# Date: 2021-08-23
# Gateway Version: 20.12.1
# Description: Example Gateway workflow
"""
Update host record form
"""
from wtforms import SubmitField
from bluecat.wtform_fields import (
Configuration,
View,
Zone,
HostRecord,
CustomStringField,
PlainHTML,
CustomBooleanField,
)
from bluecat.server_endpoints import get_host_records_endpoint
from bluecat.wtform_extensions import GatewayForm
class GenericFormTemplate(GatewayForm):
"""Form to generate HTML and Javascript for the update_host_record workflow
Note:
When updating the form, remember to make the corresponding changes to the workflow pages
"""
workflow_name = "update_host_record"
workflow_permission = "update_host_record_page"
configuration = Configuration(
workflow_name=workflow_name,
permissions=workflow_permission,
label="Configuration",
required=True,
coerce=int,
clear_below_on_change=False,
is_disabled_on_start=False,
on_complete=["call_view"],
enable_dependencies={"on_complete": ["view"]},
disable_dependencies={"on_change": ["view"]},
clear_dependencies={"on_change": ["view"]},
)
view = View(
workflow_name=workflow_name,
permissions=workflow_permission,
label="View",
required=True,
one_off=True,
clear_below_on_change=False,
enable_dependencies={"on_complete": ["parent_zone"]},
disable_dependencies={"on_change": ["parent_zone"]},
clear_dependencies={"on_change": ["parent_zone"]},
should_cascade_disable_on_change=True,
should_cascade_clear_on_change=True,
)
parent_zone = Zone(
workflow_name=workflow_name,
permissions=workflow_permission,
label="Zone",
required=True,
start_initialized=True,
inputs={"zone": "parent_zone", "configuration": "configuration", "view": "view"},
clear_below_on_change=False,
enable_dependencies={"on_complete": ["host_record"]},
disable_dependencies={"on_change": ["host_record"]},
clear_dependencies={"on_change": ["host_record", "name", "ip4_address"]},
should_cascade_disable_on_change=True,
should_cascade_clear_on_change=True,
)
host_record = HostRecord(
workflow_name=workflow_name,
permissions=workflow_permission,
label="Host Record",
required=True,
inputs={
"configuration": "configuration",
"view": "view",
"parent_zone": "parent_zone",
"host_record": "host_record",
},
server_outputs={"on_complete": {"name": "name", "addresses": "ip4_address"}},
server_side_output_method=get_host_records_endpoint,
clear_below_on_change=False,
enable_dependencies={"on_complete": ["submit", "name", "ip4_address", "deploy_now"]},
disable_dependencies={"on_change": ["submit", "name", "ip4_address", "deploy_now"]},
should_cascade_disable_on_change=True,
)
separator = PlainHTML("<hr>")
name = CustomStringField(label="New Host Name", required=True)
ip4_address = CustomStringField(
label="IPv4 Address (multiple IPv4 addresses must be separated by a comma)", required=True
)
deploy_now = CustomBooleanField(label="Deploy Now")
submit = SubmitField(label="Update")
| [
[
[
770,
781
],
[
4022,
4033
]
],
[
[
822,
835
],
[
1402,
1415
]
],
[
[
841,
845
],
[
1856,
1860
]
],
[
[
851,
855
],
[
2343,
2347
]
],
[
[
861,
871
],
[
2953,
2963
]
],
[
[
877,
894
],
[
3752,
3769
],
[
3827,
3844
]
],
[
[
900,
909
],
[
3722,
3731
]
],
[
[
915,
933
],
[
3969,
3987
]
],
[
[
974,
999
],
[
3401,
3426
]
],
[
[
1038,
1049
],
[
1078,
1089
]
],
[
[
1058,
1077
]
]
] |
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_catalog_namespace_facts
short_description: Fetches details about one or multiple Namespace resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple Namespace resources in Oracle Cloud Infrastructure
- Returns a list of namespaces within a data catalog.
- If I(namespace_id) is specified, the details of a single Namespace will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
catalog_id:
description:
- Unique catalog identifier.
type: str
required: true
namespace_id:
description:
- Unique namespace identifier.
- Required to get a specific namespace.
type: str
aliases: ["id"]
fields:
description:
- Specifies the fields to return in a namespace response.
type: list
elements: str
choices:
- "key"
- "displayName"
- "description"
- "lifecycleState"
- "timeCreated"
- "timeUpdated"
- "createdById"
- "updatedById"
- "properties"
display_name:
description:
- A filter to return only resources that match the entire display name given. The match is not case sensitive.
type: str
aliases: ["name"]
display_name_contains:
description:
- "A filter to return only resources that match display name pattern given. The match is not case sensitive.
For Example : /folders?displayNameContains=Cu.*
The above would match all folders with display name that starts with \\"Cu\\" or has the pattern \\"Cu\\" anywhere in between."
type: str
lifecycle_state:
description:
- A filter to return only resources that match the specified lifecycle state. The value is case insensitive.
type: str
choices:
- "CREATING"
- "ACTIVE"
- "INACTIVE"
- "UPDATING"
- "DELETING"
- "DELETED"
- "FAILED"
- "MOVING"
time_created:
description:
- Time that the resource was created. An L(RFC3339,https://tools.ietf.org/html/rfc3339) formatted datetime string.
type: str
time_updated:
description:
- Time that the resource was updated. An L(RFC3339,https://tools.ietf.org/html/rfc3339) formatted datetime string.
type: str
created_by_id:
description:
- OCID of the user who created the resource.
type: str
updated_by_id:
description:
- OCID of the user who updated the resource.
type: str
sort_by:
description:
- The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is
ascending. If no value is specified TIMECREATED is default.
type: str
choices:
- "TIMECREATED"
- "DISPLAYNAME"
sort_order:
description:
- The sort order to use, either 'asc' or 'desc'.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific namespace
oci_data_catalog_namespace_facts:
# required
catalog_id: "ocid1.catalog.oc1..xxxxxxEXAMPLExxxxxx"
namespace_id: "ocid1.namespace.oc1..xxxxxxEXAMPLExxxxxx"
# optional
fields: [ "key" ]
- name: List namespaces
oci_data_catalog_namespace_facts:
# required
catalog_id: "ocid1.catalog.oc1..xxxxxxEXAMPLExxxxxx"
# optional
fields: [ "key" ]
display_name: display_name_example
display_name_contains: display_name_contains_example
lifecycle_state: CREATING
time_created: 2013-10-20T19:20:30+01:00
time_updated: 2013-10-20T19:20:30+01:00
created_by_id: "ocid1.createdby.oc1..xxxxxxEXAMPLExxxxxx"
updated_by_id: "ocid1.updatedby.oc1..xxxxxxEXAMPLExxxxxx"
sort_by: TIMECREATED
sort_order: ASC
"""
RETURN = """
namespaces:
description:
- List of Namespace resources
returned: on success
type: complex
contains:
key:
description:
- Unique namespace key that is immutable.
returned: on success
type: str
sample: key_example
display_name:
description:
- Name of the Namespace
returned: on success
type: str
sample: display_name_example
description:
description:
- Description for the namespace
returned: on success
type: str
sample: description_example
is_service_defined:
description:
- If this field is defined by service or by a user
returned: on success
type: bool
sample: true
lifecycle_state:
description:
- The current state of the namespace.
returned: on success
type: str
sample: CREATING
time_created:
description:
- "The date and time the namespace was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
Example: `2019-03-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The last time that any change was made to the namespace. An L(RFC3339,https://tools.ietf.org/html/rfc3339) formatted datetime string.
- Returned for get operation
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
created_by_id:
description:
- OCID of the user who created the namespace.
- Returned for get operation
returned: on success
type: str
sample: "ocid1.createdby.oc1..xxxxxxEXAMPLExxxxxx"
updated_by_id:
description:
- OCID of the user who last modified the namespace.
- Returned for get operation
returned: on success
type: str
sample: "ocid1.updatedby.oc1..xxxxxxEXAMPLExxxxxx"
sample: [{
"key": "key_example",
"display_name": "display_name_example",
"description": "description_example",
"is_service_defined": true,
"lifecycle_state": "CREATING",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"created_by_id": "ocid1.createdby.oc1..xxxxxxEXAMPLExxxxxx",
"updated_by_id": "ocid1.updatedby.oc1..xxxxxxEXAMPLExxxxxx"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.data_catalog import DataCatalogClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataCatalogNamespaceFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"catalog_id",
"namespace_id",
]
def get_required_params_for_list(self):
return [
"catalog_id",
]
def get_resource(self):
optional_get_method_params = [
"fields",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.get_namespace,
catalog_id=self.module.params.get("catalog_id"),
namespace_id=self.module.params.get("namespace_id"),
**optional_kwargs
)
def list_resources(self):
optional_list_method_params = [
"display_name",
"display_name_contains",
"lifecycle_state",
"time_created",
"time_updated",
"created_by_id",
"updated_by_id",
"sort_by",
"sort_order",
"fields",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_namespaces,
catalog_id=self.module.params.get("catalog_id"),
**optional_kwargs
)
DataCatalogNamespaceFactsHelperCustom = get_custom_class(
"DataCatalogNamespaceFactsHelperCustom"
)
class ResourceFactsHelper(
DataCatalogNamespaceFactsHelperCustom, DataCatalogNamespaceFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
catalog_id=dict(type="str", required=True),
namespace_id=dict(aliases=["id"], type="str"),
fields=dict(
type="list",
elements="str",
choices=[
"key",
"displayName",
"description",
"lifecycleState",
"timeCreated",
"timeUpdated",
"createdById",
"updatedById",
"properties",
],
),
display_name=dict(aliases=["name"], type="str"),
display_name_contains=dict(type="str"),
lifecycle_state=dict(
type="str",
choices=[
"CREATING",
"ACTIVE",
"INACTIVE",
"UPDATING",
"DELETING",
"DELETED",
"FAILED",
"MOVING",
],
),
time_created=dict(type="str"),
time_updated=dict(type="str"),
created_by_id=dict(type="str"),
updated_by_id=dict(type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "DISPLAYNAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="namespace",
service_client_class=DataCatalogClient,
namespace="data_catalog",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(namespaces=result)
if __name__ == "__main__":
main()
| [
[
[
421,
436
]
],
[
[
438,
446
]
],
[
[
448,
462
]
],
[
[
464,
477
]
],
[
[
486,
502
]
],
[
[
601,
614
]
],
[
[
3959,
3967
]
],
[
[
4774,
4780
]
],
[
[
7604,
7617
],
[
11353,
11366
]
],
[
[
7682,
7698
],
[
8621,
8637
],
[
9438,
9454
],
[
9874,
9890
]
],
[
[
7788,
7814
],
[
8014,
8040
]
],
[
[
7820,
7836
],
[
9659,
9675
]
],
[
[
7879,
7896
],
[
11632,
11649
]
],
[
[
7902,
7916
],
[
11406,
11420
]
],
[
[
7948,
7962
],
[
11406,
11420
]
],
[
[
7979,
8013
],
[
9795,
9829
]
],
[
[
9619,
9656
],
[
9756,
9793
]
],
[
[
9731,
9750
],
[
11524,
11543
]
],
[
[
9848,
9852
],
[
12004,
12008
]
]
] |
from app import app
app.run(app.config['HOST'], app.config['PORT'], app.config['DEBUG'])
| [
[
[
16,
19
],
[
20,
23
],
[
28,
31
],
[
48,
51
],
[
68,
71
]
]
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Fortnite replay api blueprint
"""
| [] |
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
s3gis = s3gis_tests.s3gis
def test_KMLLayer():
current.session.s3.debug = True
current.request.utcnow = datetime.datetime.now()
s3gis_tests.layer_test(
db,
db.gis_layer_kml,
dict(
name = "Test KML",
description = "Test KML layer",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
url = "test://test_KML",
),
"S3.gis.layers_kml",
[
{
"marker_height": 34,
"marker_image": u"gis_marker.image.marker_red.png",
"marker_width": 20,
"name": u"Test KML",
# this shows that caching is OK:
"url": u"/eden/default/download/gis_cache2.file.Test_20KML.kml"
}
],
session = session,
request = request,
)
def test_KMLCaching_not_possible():
import os.path
import sys
class Mock(object):
pass
mock_stderr = Mock()
buffer = []
def mock_write(error_message):
buffer.append(error_message)
mock_stderr.write = mock_write
with s3gis_tests.Change(
os.path,
{
"exists": lambda *a, **kw: False
}
):
with s3gis_tests.Change(
sys,
{
"stderr": mock_stderr
}
):
with s3gis_tests.Change(
current.session.s3,
{
"debug": False
}
):
kml_layer = s3gis.KMLLayer(s3gis.GIS())
js = kml_layer.as_javascript()
assert session.error.startswith(
"GIS: KML layers cannot be cached: "
)
assert "GIS: KML layers cannot be cached:" in buffer[0]
| [
[
[
1,
12
],
[
72,
83
],
[
205,
216
],
[
1257,
1268
],
[
1379,
1390
],
[
1510,
1521
]
],
[
[
64,
69
],
[
1680,
1685
],
[
1695,
1700
]
],
[
[
95,
108
]
],
[
[
987,
1015
]
]
] |
from .models import Character, Faction, Ship
__author__ = 'ekampf'
def initialize():
human = Character(name='Human')
human.put()
droid = Character(name='Droid')
droid.put()
rebels = Faction(id="rebels", name='Alliance to Restore the Republic', hero_key=human.key)
rebels.put()
empire = Faction(id="empire", name='Galactic Empire', hero_key=droid.key)
empire.put()
xwing = Ship(name='X-Wing', faction_key=rebels.key)
xwing.put()
ywing = Ship(name='Y-Wing', faction_key=rebels.key)
ywing.put()
awing = Ship(name='A-Wing', faction_key=rebels.key)
awing.put()
# Yeah, technically it's Corellian. But it flew in the service of the rebels,
# so for the purposes of this demo it's a rebel ship.
falcon = Ship(name='Millenium Falcon', faction_key=rebels.key)
falcon.put()
homeOne = Ship(name='Home One', faction_key=rebels.key)
homeOne.put()
tieFighter = Ship(name='TIE Fighter', faction_key=empire.key)
tieFighter.put()
tieInterceptor = Ship(name='TIE Interceptor', faction_key=empire.key)
tieInterceptor.put()
executor = Ship(name='Executor', faction_key=empire.key)
executor.put()
def create_ship(ship_name, faction_key):
new_ship = Ship(name=ship_name, faction_key=faction_key)
new_ship.put()
return new_ship
| [
[
[
21,
30
],
[
101,
110
],
[
154,
163
]
],
[
[
32,
39
],
[
208,
215
],
[
321,
328
]
],
[
[
41,
45
],
[
416,
420
],
[
489,
493
],
[
562,
566
],
[
776,
780
],
[
862,
866
],
[
944,
948
],
[
1036,
1040
],
[
1130,
1134
],
[
1253,
1257
]
],
[
[
47,
57
]
],
[
[
75,
85
]
],
[
[
1201,
1212
]
]
] |
import logging
import time
import json
from collections import defaultdict
import tqdm
import click
from django.utils import timezone
from django.db import transaction, connection
from django.db.models import Q
from django.contrib.auth import get_user_model
import rssant_common.django_setup # noqa:F401
from rssant_api.models import Feed, Story, UnionFeed, UserStory, UserFeed
from rssant_api.helper import reverse_url
from rssant_common import _proxy_helper
from rssant_common.helper import format_table, pretty_format_json
from rssant_feedlib.reader import FeedResponseStatus, FeedReader
from rssant_common import unionid
from rssant_feedlib import processor
from rssant_common.actor_client import scheduler
from rssant_config import CONFIG
LOG = logging.getLogger(__name__)
@click.group()
def main():
"""RSS Commands"""
def _decode_feed_ids(option_feeds):
"""
>>> _decode_feed_ids('123,456')
[123, 456]
"""
return [int(x) for x in option_feeds.strip().split(',')]
def _decode_union_feed_ids(option_feeds):
"""
>>> _decode_union_feed_ids('014064,0140be')
[196, 366]
"""
return [unionid.decode(x)[1] for x in option_feeds.strip().split(',')]
def _get_all_feed_ids():
feed_ids = [feed.id for feed in Feed.objects.only('id').all()]
return feed_ids
def _get_feed_ids(option_feeds):
if option_feeds and option_feeds != 'all':
feed_ids = _decode_feed_ids(option_feeds)
else:
feed_ids = _get_all_feed_ids()
return feed_ids
def _get_story_ids(option_storys):
if option_storys:
story_ids = option_storys.strip().split(',')
else:
story_ids = [story.id for story in Story.objects.only('id').all()]
return story_ids
@main.command()
@click.option('--dry-run', is_flag=True)
def fix_feed_total_storys(dry_run=False):
incorrect_feeds = Story.query_feed_incorrect_total_storys()
LOG.info('total %s incorrect feeds', len(incorrect_feeds))
header = ['feed_id', 'total_storys', 'correct_total_storys']
click.echo(format_table(incorrect_feeds, header=header))
if dry_run:
return
with transaction.atomic():
num_corrected = 0
for feed_id, *__ in tqdm.tqdm(incorrect_feeds, ncols=80, ascii=True):
fixed = Story.fix_feed_total_storys(feed_id)
if fixed:
num_corrected += 1
LOG.info('correct %s feeds', num_corrected)
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
def update_feed_monthly_story_count(feeds=None):
feed_ids = _get_feed_ids(feeds)
LOG.info('total %s feeds', len(feed_ids))
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
with transaction.atomic():
Story.refresh_feed_monthly_story_count(feed_id)
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
def update_feed_dryness(feeds=None):
feed_ids = _get_feed_ids(feeds)
LOG.info('total %s feeds', len(feed_ids))
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
with transaction.atomic():
feed = Feed.get_by_pk(feed_id)
if feed.total_storys <= 0:
continue
cnt = feed.monthly_story_count
if not cnt:
Story.refresh_feed_monthly_story_count(feed_id)
feed.refresh_from_db()
feed.dryness = feed.monthly_story_count.dryness()
feed.save()
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
def update_feed_dt_first_story_published(feeds=None):
feed_ids = _get_feed_ids(feeds)
LOG.info('total %s feeds', len(feed_ids))
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
with transaction.atomic():
feed = Feed.get_by_pk(feed_id)
if feed.dt_first_story_published:
continue
if feed.total_storys <= 0:
continue
try:
story = Story.get_by_offset(feed_id, 0, detail=True)
except Story.DoesNotExist:
LOG.warning(f'story feed_id={feed_id} offset=0 not exists')
continue
feed.dt_first_story_published = story.dt_published
feed.save()
@main.command()
@click.option('--storys', help="story ids, separate by ','")
def update_story_has_mathjax(storys=None):
story_ids = _get_story_ids(storys)
LOG.info('total %s storys', len(story_ids))
for story_id in tqdm.tqdm(story_ids, ncols=80, ascii=True):
with transaction.atomic():
story = Story.objects.only('id', 'content', '_version').get(pk=story_id)
if processor.story_has_mathjax(story.content):
story.has_mathjax = True
story.save()
@main.command()
def update_story_is_user_marked():
user_storys = list(
UserStory.objects
.exclude(is_watched=False, is_favorited=False)
.all()
)
LOG.info('total %s user marked storys', len(user_storys))
if not user_storys:
return
for user_story in tqdm.tqdm(user_storys, ncols=80, ascii=True):
Story.set_user_marked_by_id(user_story.story_id)
@main.command()
@click.option('--storys', help="story ids, separate by ','")
def process_story_links(storys=None):
story_ids = _get_story_ids(storys)
LOG.info('total %s storys', len(story_ids))
for story_id in tqdm.tqdm(story_ids, ncols=80, ascii=True):
with transaction.atomic():
story = Story.objects.only('id', 'content', '_version').get(pk=story_id)
content = processor.process_story_links(story.content, story.link)
if story.content != content:
story.content = content
story.save()
@main.command()
@click.option('--storys', help="story ids, separate by ','")
def update_story_images(storys=None):
story_ids = _get_story_ids(storys)
LOG.info('total %s storys', len(story_ids))
for story_id in tqdm.tqdm(story_ids, ncols=80, ascii=True):
story = Story.objects.get(pk=story_id)
scheduler.tell('harbor_rss.update_story_images', dict(
story_id=story_id,
story_url=story.link,
images=[],
))
@main.command()
@click.argument('unionid_text')
def decode_unionid(unionid_text):
numbers = unionid.decode(unionid_text)
if len(numbers) == 3:
click.echo('user_id={} feed_id={} offset={}'.format(*numbers))
elif len(numbers) == 2:
click.echo('user_id={} feed_id={}'.format(*numbers))
else:
click.echo(numbers)
@main.command()
@click.option('--days', type=int, default=1)
@click.option('--limit', type=int, default=100)
@click.option('--threshold', type=int, default=99)
def delete_invalid_feeds(days=1, limit=100, threshold=99):
sql = """
SELECT feed_id, title, link, url, status_code, count FROM (
SELECT feed_id, status_code, count(1) as count FROM rssant_api_rawfeed
WHERE dt_created >= %s and (status_code < 200 or status_code >= 400)
group by feed_id, status_code
having count(1) > 3
order by count desc
limit %s
) error_feed
join rssant_api_feed
on error_feed.feed_id = rssant_api_feed.id
order by feed_id, status_code, count;
"""
sql_ok_count = """
SELECT feed_id, count(1) as count FROM rssant_api_rawfeed
WHERE dt_created >= %s and (status_code >= 200 and status_code < 400)
AND feed_id=ANY(%s)
group by feed_id
"""
t_begin = timezone.now() - timezone.timedelta(days=days)
error_feeds = defaultdict(dict)
with connection.cursor() as cursor:
cursor.execute(sql, [t_begin, limit])
for feed_id, title, link, url, status_code, count in cursor.fetchall():
error_feeds[feed_id].update(feed_id=feed_id, title=title, link=link, url=url)
error = error_feeds[feed_id].setdefault('error', {})
error_name = FeedResponseStatus.name_of(status_code)
error[error_name] = count
error_feeds[feed_id]['error_count'] = sum(error.values())
error_feeds[feed_id].update(ok_count=0, error_percent=100)
cursor.execute(sql_ok_count, [t_begin, list(error_feeds)])
for feed_id, ok_count in cursor.fetchall():
feed = error_feeds[feed_id]
total = feed['error_count'] + ok_count
error_percent = round((feed['error_count'] / total) * 100)
feed.update(ok_count=ok_count, error_percent=error_percent)
error_feeds = list(sorted(error_feeds.values(), key=lambda x: x['error_percent'], reverse=True))
delete_feed_ids = []
for feed in error_feeds:
if feed['error_percent'] >= threshold:
delete_feed_ids.append(feed['feed_id'])
click.echo(pretty_format_json(feed))
if delete_feed_ids:
confirm_delete = click.confirm(f'Delete {len(delete_feed_ids)} feeds?')
if not confirm_delete:
click.echo('Abort!')
else:
UnionFeed.bulk_delete(delete_feed_ids)
click.echo('Done!')
return error_feeds
@main.command()
def fix_user_story_offset():
sql = """
SELECT us.id, us."offset", story."offset"
FROM rssant_api_userstory AS us
LEFT OUTER JOIN rssant_api_story AS story
ON us.story_id=story.id
WHERE us."offset" != story."offset"
"""
items = []
with connection.cursor() as cursor:
cursor.execute(sql)
for us_id, us_offset, story_offset in cursor.fetchall():
items.append((us_id, us_offset, story_offset))
click.echo(f'total {len(items)} mismatch user story offset')
if not items:
return
with transaction.atomic():
for us_id, us_offset, story_offset in tqdm.tqdm(items, ncols=80, ascii=True):
UserStory.objects.filter(pk=us_id).update(offset=-us_offset)
for us_id, us_offset, story_offset in tqdm.tqdm(items, ncols=80, ascii=True):
UserStory.objects.filter(pk=us_id).update(offset=story_offset)
@main.command()
def subscribe_changelog():
changelog_url = CONFIG.root_url.rstrip('/') + '/changelog.atom'
feed = Feed.objects.get(url=changelog_url)
if not feed:
click.echo(f'not found changelog feed url={changelog_url}')
return
click.echo(f'changelog feed {feed}')
User = get_user_model()
users = list(User.objects.all())
click.echo(f'total {len(users)} users')
for user in tqdm.tqdm(users, ncols=80, ascii=True):
with transaction.atomic():
user_feed = UserFeed.objects\
.filter(user_id=user.id, feed_id=feed.id).first()
if not user_feed:
user_feed = UserFeed(
user_id=user.id,
feed_id=feed.id,
is_from_bookmark=False,
)
user_feed.save()
@main.command()
def update_feed_use_proxy():
if not CONFIG.rss_proxy_enable:
click.echo('rss proxy not enable!')
return
blacklist = [
'%博客园%',
'%微信%',
'%新浪%',
'%的评论%',
'%Comments on%',
]
sql = """
select * from rssant_api_feed
where (NOT title LIKE ANY(%s)) AND (
dt_created >= '2020-04-01' or
(total_storys <= 5 and dt_updated <= '2019-12-01')
)
"""
feeds = list(Feed.objects.raw(sql, [blacklist]))
click.echo(f'{len(feeds)} feeds need check')
reader = FeedReader(**_proxy_helper.get_proxy_options())
proxy_feeds = []
with reader:
for i, feed in enumerate(feeds):
click.echo(f'#{i} {feed}')
status = reader.read(feed.url).status
click.echo(f' #{i} status={FeedResponseStatus.name_of(status)}')
if FeedResponseStatus.is_need_proxy(status):
proxy_status = reader.read(feed.url, use_proxy=True).status
click.echo(f' #{i} proxy_status={FeedResponseStatus.name_of(proxy_status)}')
if proxy_status == 200:
proxy_feeds.append(feed)
click.echo(f'{len(proxy_feeds)} feeds need use proxy')
if proxy_feeds:
with transaction.atomic():
for feed in tqdm.tqdm(proxy_feeds, ncols=80, ascii=True):
feed.refresh_from_db()
feed.use_proxy = True
feed.save()
@main.command()
@click.argument('key')
def delete_feed(key):
try:
key = int(key)
except ValueError:
pass # ignore
if isinstance(key, int):
feed = Feed.get_by_pk(key)
else:
feed = Feed.objects.filter(
Q(url__contains=key) | Q(title__contains=key)
).first()
if not feed:
print(f'not found feed like {key}')
return
if click.confirm(f'delete {feed} ?'):
feed.delete()
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
@click.option('--union-feeds', help="union feed ids, separate by ','")
@click.option('--key', help="feed url or title keyword")
@click.option('--expire', type=int, default=1, help="expire hours")
def refresh_feed(feeds, union_feeds, key, expire=None):
feed_ids = []
if feeds:
feed_ids.extend(_get_feed_ids(feeds))
if union_feeds:
feed_ids.extend(_decode_union_feed_ids(union_feeds))
if key:
cond = Q(url__contains=key) | Q(title__contains=key)
feed_objs = Feed.objects.filter(cond).only('id').all()
feed_ids.extend(x.id for x in feed_objs)
feed_ids = list(sorted(set(feed_ids)))
expire_at = time.time() + expire * 60 * 60
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
feed = Feed.objects.only('id', 'url', 'use_proxy').get(pk=feed_id)
scheduler.tell('worker_rss.sync_feed', dict(
feed_id=feed.id,
url=feed.url,
use_proxy=feed.use_proxy,
is_refresh=True,
), expire_at=expire_at)
@main.command()
@click.option('--feeds', required=True, help="feed ids, separate by ','")
def update_feed_reverse_url(feeds):
feed_ids = _get_feed_ids(feeds)
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
feed = Feed.objects.get(pk=feed_id)
feed.reverse_url = reverse_url(feed.url)
feed.save()
@main.command()
@click.option('--dst', required=True, help='actor dst')
@click.option('--content', help='message content')
@click.option('--expire-seconds', type=int, help='expire time in seconds')
def tell(dst, content, expire_seconds):
if content:
content = json.loads(content)
expire_at = None
if expire_seconds:
expire_at = int(time.time()) + expire_seconds
scheduler.tell(dst, content=content, expire_at=expire_at)
if __name__ == "__main__":
main()
| [
[
[
7,
14
],
[
755,
762
]
],
[
[
22,
26
],
[
13540,
13544
],
[
14616,
14620
]
],
[
[
34,
38
],
[
14528,
14532
]
],
[
[
63,
74
],
[
7572,
7583
]
],
[
[
83,
87
],
[
2202,
2206
],
[
2645,
2649
],
[
2998,
3002
],
[
3667,
3671
],
[
4465,
4469
],
[
5060,
5064
],
[
5387,
5391
],
[
5964,
5968
],
[
9750,
9754
],
[
9909,
9913
],
[
10450,
10454
],
[
12190,
12194
],
[
13590,
13594
],
[
14098,
14102
]
],
[
[
95,
100
],
[
786,
791
],
[
1751,
1756
],
[
2437,
2442
],
[
2802,
2807
],
[
3454,
3459
],
[
4255,
4260
],
[
5182,
5187
],
[
5759,
5764
],
[
6236,
6241
],
[
6587,
6592
],
[
6632,
6637
],
[
6680,
6685
],
[
12360,
12365
],
[
12827,
12832
],
[
12886,
12891
],
[
12957,
12962
],
[
13014,
13019
],
[
13934,
13939
],
[
14273,
14278
],
[
14329,
14334
],
[
14380,
14385
],
[
2029,
2034
],
[
6378,
6383
],
[
6477,
6482
],
[
6548,
6553
],
[
8774,
8779
],
[
8860,
8865
],
[
8958,
8963
],
[
9056,
9061
],
[
9579,
9584
],
[
10209,
10214
],
[
10288,
10293
],
[
10394,
10399
],
[
10961,
10966
],
[
11384,
11389
],
[
11581,
11586
],
[
11670,
11675
],
[
11887,
11892
],
[
12056,
12061
],
[
12751,
12756
]
],
[
[
126,
134
],
[
7507,
7515
],
[
7524,
7532
]
],
[
[
157,
168
],
[
2126,
2137
],
[
2701,
2712
],
[
3054,
3065
],
[
3723,
3734
],
[
4522,
4533
],
[
5444,
5455
],
[
9682,
9693
],
[
10503,
10514
],
[
12144,
12155
]
],
[
[
170,
180
],
[
7599,
7609
],
[
9392,
9402
]
],
[
[
210,
211
],
[
12604,
12605
],
[
12627,
12628
],
[
13323,
13324
],
[
13346,
13347
]
],
[
[
244,
258
],
[
10336,
10350
]
],
[
[
267,
293
]
],
[
[
337,
341
],
[
1262,
1266
],
[
3095,
3099
],
[
3764,
3768
],
[
10148,
10152
],
[
11344,
11348
],
[
12526,
12530
],
[
12571,
12575
],
[
13389,
13393
],
[
13648,
13652
],
[
14156,
14160
]
],
[
[
343,
348
],
[
1679,
1684
],
[
1855,
1860
],
[
2272,
2277
],
[
2735,
2740
],
[
3266,
3271
],
[
3964,
3969
],
[
4028,
4033
],
[
4564,
4569
],
[
5114,
5119
],
[
5486,
5491
],
[
6024,
6029
]
],
[
[
350,
359
],
[
9005,
9014
]
],
[
[
361,
370
],
[
4843,
4852
],
[
9802,
9811
],
[
9961,
9970
]
],
[
[
372,
380
],
[
10549,
10557
],
[
10691,
10699
]
],
[
[
411,
422
],
[
14212,
14223
]
],
[
[
449,
462
],
[
11455,
11468
]
],
[
[
496,
508
],
[
2040,
2052
]
],
[
[
510,
528
],
[
8785,
8803
]
],
[
[
563,
581
],
[
7936,
7954
],
[
11700,
11718
],
[
11753,
11771
],
[
11923,
11941
]
],
[
[
583,
593
],
[
11442,
11452
]
],
[
[
620,
627
],
[
1136,
1143
],
[
6315,
6322
]
],
[
[
655,
664
],
[
4644,
4653
],
[
5573,
5582
]
],
[
[
704,
713
],
[
6063,
6072
],
[
13716,
13725
],
[
14650,
14659
]
],
[
[
740,
746
],
[
10089,
10095
],
[
10928,
10934
]
],
[
[
749,
752
],
[
1901,
1904
],
[
2374,
2377
],
[
2584,
2587
],
[
2937,
2940
],
[
3606,
3609
],
[
4064,
4067
],
[
4401,
4404
],
[
4941,
4944
],
[
5323,
5326
],
[
5900,
5903
]
],
[
[
804,
808
],
[
1735,
1739
],
[
2421,
2425
],
[
2786,
2790
],
[
3438,
3442
],
[
4239,
4243
],
[
4761,
4765
],
[
5166,
5170
],
[
5743,
5747
],
[
6220,
6224
],
[
6571,
6575
],
[
9102,
9106
],
[
10027,
10031
],
[
10873,
10877
],
[
12344,
12348
],
[
12811,
12815
],
[
13918,
13922
],
[
14257,
14261
],
[
14741,
14745
]
],
[
[
841,
857
],
[
1414,
1430
]
],
[
[
1007,
1029
],
[
13259,
13281
]
],
[
[
1205,
1222
],
[
1474,
1491
]
],
[
[
1319,
1332
],
[
2559,
2572
],
[
2912,
2925
],
[
3581,
3594
],
[
13193,
13206
],
[
14058,
14071
]
],
[
[
1520,
1534
],
[
4374,
4388
],
[
5296,
5310
],
[
5873,
5887
]
],
[
[
1795,
1816
]
],
[
[
2499,
2530
]
],
[
[
2864,
2883
]
],
[
[
3516,
3552
]
],
[
[
4319,
4343
]
],
[
[
4780,
4807
]
],
[
[
5246,
5265
]
],
[
[
5823,
5842
]
],
[
[
6271,
6285
]
],
[
[
6734,
6754
]
],
[
[
9121,
9142
]
],
[
[
10046,
10065
]
],
[
[
10892,
10913
]
],
[
[
12386,
12397
]
],
[
[
13085,
13097
]
],
[
[
14011,
14034
]
],
[
[
14458,
14462
]
]
] |
#!/usr/bin/env python
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
header = self.request.headers.get('X-AppEngine-Cron', None)
if not header:
raise ValueError('attempt to access cron handler directly, '
'missing custom App Engine header')
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
header = self.request.headers.get('X-AppEngine-QueueName', None)
if not header:
raise ValueError('attempt to access task handler directly, '
'missing custom App Engine header')
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler)
], debug=True)
| [
[
[
30,
37
],
[
189,
196
],
[
624,
631
],
[
1387,
1394
]
],
[
[
71,
83
],
[
1045,
1057
]
],
[
[
117,
121
],
[
968,
972
]
],
[
[
145,
158
],
[
514,
527
]
],
[
[
166,
188
],
[
1445,
1467
]
],
[
[
595,
623
],
[
1509,
1537
]
],
[
[
1381,
1384
]
]
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def pad_to_shape(this, shp):
"""
Not a very safe function.
"""
return F.pad(this, (0, shp[3] - this.shape[3], 0, shp[2] - this.shape[2]))
class First(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, dropout=False):
super(First, self).__init__()
layers = [
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.first = nn.Sequential(*layers)
def forward(self, x):
return self.first(x)
class Encoder(nn.Module):
def __init__(
self, in_channels, middle_channels, out_channels,
dropout=False, downsample_kernel=2
):
super(Encoder, self).__init__()
layers = [
nn.MaxPool2d(kernel_size=downsample_kernel),
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.encoder = nn.Sequential(*layers)
def forward(self, x):
return self.encoder(x)
class Center(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):
super(Center, self).__init__()
layers = [
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.center = nn.Sequential(*layers)
def forward(self, x):
return self.center(x)
class Decoder(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):
super(Decoder, self).__init__()
layers = [
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.decoder = nn.Sequential(*layers)
def forward(self, x):
return self.decoder(x)
class Last(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, softmax=False):
super(Last, self).__init__()
layers = [
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=1),
nn.Sigmoid()
]
if softmax:
layers.append(nn.Softmax2d())
self.first = nn.Sequential(*layers)
def forward(self, x):
return self.first(x)
class UNet(nn.Module):
def __init__(self, in_channels, out_channels, softmax=False):
super(UNet, self).__init__()
self.first = First(in_channels, 64, 64)
self.encoder_1 = Encoder(64, 128, 128)
self.encoder_2 = Encoder(128, 256, 256)
self.encoder_3 = Encoder(256, 512, 512)
self.center = Center(512, 1024, 1024, 512)
self.decoder_3 = Decoder(1024, 512, 512, 256)
self.decoder_2 = Decoder(512, 256, 256, 128)
self.decoder_1 = Decoder(256, 128, 128, 64)
self.last = Last(128, 64, out_channels, softmax=softmax)
def forward(self, x):
x_first = self.first(x)
x_enc_1 = self.encoder_1(x_first)
x_enc_2 = self.encoder_2(x_enc_1)
x_enc_3 = self.encoder_3(x_enc_2)
x_cent = self.center(x_enc_3)
x_dec_3 = self.decoder_3(torch.cat([pad_to_shape(x_cent, x_enc_3.shape), x_enc_3], dim=1))
x_dec_2 = self.decoder_2(torch.cat([pad_to_shape(x_dec_3, x_enc_2.shape), x_enc_2], dim=1))
x_dec_1 = self.decoder_1(torch.cat([pad_to_shape(x_dec_2, x_enc_1.shape), x_enc_1], dim=1))
return self.last(torch.cat([pad_to_shape(x_dec_1, x_first.shape), x_first], dim=1))
if __name__ == '__main__':
pass
| [
[
[
7,
12
],
[
5346,
5351
],
[
5446,
5451
],
[
5547,
5552
],
[
5640,
5645
]
],
[
[
21,
35
],
[
286,
288
],
[
1048,
1050
],
[
1927,
1929
],
[
2840,
2842
],
[
3711,
3713
],
[
4490,
4492
],
[
456,
458
],
[
536,
538
],
[
582,
584
],
[
618,
620
],
[
699,
701
],
[
742,
744
],
[
898,
900
],
[
947,
949
],
[
1274,
1276
],
[
1332,
1334
],
[
1412,
1414
],
[
1458,
1460
],
[
1494,
1496
],
[
1575,
1577
],
[
1618,
1620
],
[
1774,
1776
],
[
1825,
1827
],
[
2115,
2117
],
[
2157,
2159
],
[
2237,
2239
],
[
2283,
2285
],
[
2319,
2321
],
[
2400,
2402
],
[
2443,
2445
],
[
2479,
2481
],
[
2688,
2690
],
[
2738,
2740
],
[
3029,
3031
],
[
3109,
3111
],
[
3155,
3157
],
[
3191,
3193
],
[
3272,
3274
],
[
3315,
3317
],
[
3351,
3353
],
[
3560,
3562
],
[
3611,
3613
],
[
3880,
3882
],
[
3960,
3962
],
[
4006,
4008
],
[
4042,
4044
],
[
4126,
4128
],
[
4172,
4174
],
[
4208,
4210
],
[
4278,
4280
],
[
4352,
4354
],
[
4392,
4394
]
],
[
[
44,
68
],
[
201,
202
]
],
[
[
97,
105
]
],
[
[
115,
127
],
[
5357,
5369
],
[
5457,
5469
],
[
5558,
5570
],
[
5651,
5663
]
],
[
[
280,
285
],
[
397,
402
],
[
4629,
4634
]
],
[
[
1040,
1047
],
[
1213,
1220
],
[
4682,
4689
],
[
4730,
4737
],
[
4779,
4786
]
],
[
[
1920,
1926
],
[
2055,
2061
],
[
4825,
4831
]
],
[
[
2832,
2839
],
[
2968,
2975
],
[
4880,
4887
],
[
4935,
4942
],
[
4989,
4996
]
],
[
[
3706,
3710
],
[
3822,
3826
],
[
5037,
5041
]
],
[
[
4485,
4489
],
[
4584,
4588
]
]
] |
"""Run decoding analyses in sensors space accross memory content and
visual perception for the working memory task and save decoding performance"""
# Authors: Romain Quentin <rom.quentin@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import numpy as np
import mne
from h5io import read_hdf5
from mne.decoding import GeneralizingEstimator, LinearModel
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from jr.gat import (AngularRegression, scorer_spearman,
scorer_angle)
from base import (complete_behavior, get_events_interactions)
from config import path_data
import sys
subject = sys.argv[1] # read a swarm file for parralel computing on biowulf
output_folder = '/sensors_accross_epochs_and_conditions/'
# Create result folder
results_folder = op.join(path_data + 'results/' + subject + output_folder)
if not os.path.exists(results_folder):
os.makedirs(results_folder)
# read behavior
fname = op.join(path_data, subject, 'behavior_Target.hdf5')
events = read_hdf5(fname)
events = complete_behavior(events)
events = get_events_interactions(events)
# read stimulus epochs
fname = op.join(path_data, subject, 'epochs_Target.fif')
epochs_target = mne.read_epochs(fname)
epochs_target.pick_types(meg=True, ref_meg=False)
epochs_target.crop(-0.2, 0.9)
# read cue epochs
fname = op.join(path_data, subject, 'epochs_Cue.fif')
epochs_cue = mne.read_epochs(fname)
epochs_cue.pick_types(meg=True, ref_meg=False)
epochs_cue.crop(0, 1.5)
# read probe epochs
fname = op.join(path_data, subject, 'epochs_Probe.fif')
epochs_probe = mne.read_epochs(fname)
epochs_probe.pick_types(meg=True, ref_meg=False)
epochs_probe.crop(0, 0.9)
# Concatenate the data of the three epochs
X0 = epochs_target._data
X1 = epochs_cue._data
X2 = epochs_probe._data
X = np.concatenate((X0, X1, X2), axis=2)
# Define pair of analyses (train on the 2nd and test on the 1st )
paired_analyses = [['target_sfreq_cue_left_sfreq', 'left_sfreq'],
['target_sfreq_cue_right_sfreq', 'right_sfreq'],
['left_sfreq', 'target_sfreq_cue_left_sfreq'],
['right_sfreq', 'target_sfreq_cue_right_sfreq'],
['target_angle_cue_left_angle', 'left_angle'],
['target_angle_cue_right_angle', 'right_angle'],
['left_angle', 'target_angle_cue_left_angle'],
['right_angle', 'target_angle_cue_right_angle']]
# Loop across each pair of analyses
for paired_analysis in paired_analyses:
y_test = np.array(events[paired_analysis[0]])
y_train = np.array(events[paired_analysis[1]])
# Define estimators depending on the analysis
if 'angle' in paired_analysis[0][:14]:
clf = make_pipeline(StandardScaler(),
LinearModel(AngularRegression(Ridge(),
independent=False)))
scorer = scorer_angle
kwargs = dict()
gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),
n_jobs=24, **kwargs)
y_test = np.array(y_test, dtype=float)
y_train = np.array(y_train, dtype=float)
elif 'sfreq' in paired_analysis[0][:14]:
clf = make_pipeline(StandardScaler(), LinearModel(Ridge()))
scorer = scorer_spearman
kwargs = dict()
gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),
n_jobs=24, **kwargs)
y_test = np.array(y_test, dtype=float)
y_train = np.array(y_train, dtype=float)
# only consider trials with correct fixation
sel = np.where(events['is_eye_fixed'] == 1)[0]
y_train = y_train[sel]
y_test = y_test[sel]
X = np.concatenate((X0, X1, X2), axis=2)
X = X[sel]
# only consider non NaN values
# Run decoding accross condition
cv = StratifiedKFold(7)
scores = list()
scs = list()
if np.isnan(y_train).any():
sel = np.where(~np.isnan(y_train))[0]
for train, test in cv.split(X[sel], y_train[sel]):
gat.fit(X[sel][train], y_train[sel][train])
score = gat.score(X[sel][test], y_test[sel][test])
sc = gat.score(X[sel][test], y_train[sel][test]) # test on same
scores.append(score)
scs.append(sc)
scores = np.mean(scores, axis=0)
scs = np.mean(scs, axis=0)
else:
for train, test in cv.split(X, y_train):
y_te = y_test[test]
X_te = X[test]
y_te = y_te[np.where(~np.isnan(y_te))[0]]
X_te = X_te[np.where(~np.isnan(y_te))[0]]
y_tr = y_train[train]
X_tr = X[train]
y_tr = y_tr[np.where(~np.isnan(y_tr))[0]]
X_tr = X_tr[np.where(~np.isnan(y_tr))[0]]
y_tr_te = y_train[test]
X_tr_te = X[test]
y_tr_te = y_tr_te[np.where(~np.isnan(y_tr_te))[0]]
X_tr_te = X_tr_te[np.where(~np.isnan(y_tr_te))[0]]
gat.fit(X_tr, y_tr)
score = gat.score(X_te, y_te)
sc = gat.score(X_tr_te, y_tr_te) # test on same
scores.append(score)
scs.append(sc)
scores = np.mean(scores, axis=0)
scs = np.mean(scs, axis=0)
# save cross-validated scores
fname = results_folder +\
'%s_scores_%s_cross_%s.npy' % (subject,
paired_analysis[0],
paired_analysis[1])
np.save(fname, np.array(scores)) # save accross condition scores
fname = results_folder +\
'%s_scores_%s.npy' % (subject, paired_analysis[1])
np.save(fname, np.array(scs)) # save scores test/train on same condition
| [
[
[
287,
289
],
[
1084,
1086
],
[
1120,
1122
]
],
[
[
297,
310
],
[
1019,
1021
],
[
1173,
1175
],
[
1358,
1360
],
[
1552,
1554
],
[
1733,
1735
]
],
[
[
318,
329
],
[
2012,
2014
],
[
2741,
2743
],
[
2792,
2794
],
[
3312,
3314
],
[
3360,
3362
],
[
3705,
3707
],
[
3753,
3755
],
[
3843,
3845
],
[
3944,
3946
],
[
4140,
4142
],
[
4179,
4181
],
[
4189,
4191
],
[
4543,
4545
],
[
4581,
4583
],
[
4744,
4746
],
[
4754,
4756
],
[
4798,
4800
],
[
4808,
4810
],
[
4914,
4916
],
[
4924,
4926
],
[
4968,
4970
],
[
4978,
4980
],
[
5094,
5096
],
[
5104,
5106
],
[
5157,
5159
],
[
5167,
5169
],
[
5403,
5405
],
[
5441,
5443
],
[
5697,
5699
],
[
5712,
5714
],
[
5856,
5858
],
[
5871,
5873
]
],
[
[
337,
340
],
[
1423,
1426
],
[
1611,
1614
],
[
1796,
1799
]
],
[
[
358,
367
],
[
1234,
1243
]
],
[
[
393,
414
],
[
3182,
3203
],
[
3575,
3596
]
],
[
[
416,
427
],
[
2996,
3007
],
[
3482,
3493
]
],
[
[
457,
470
],
[
2936,
2949
],
[
3450,
3463
]
],
[
[
505,
519
],
[
2950,
2964
],
[
3464,
3478
]
],
[
[
553,
558
],
[
3026,
3031
],
[
3494,
3499
]
],
[
[
587,
598
],
[
3217,
3228
],
[
3610,
3621
]
],
[
[
635,
650
],
[
4077,
4092
]
],
[
[
671,
688
],
[
3008,
3025
]
],
[
[
690,
705
],
[
3521,
3536
]
],
[
[
727,
739
],
[
3131,
3143
]
],
[
[
759,
776
],
[
1260,
1277
]
],
[
[
778,
801
],
[
1295,
1318
]
],
[
[
822,
831
],
[
1027,
1036
],
[
1181,
1190
],
[
1366,
1375
],
[
1560,
1569
],
[
1741,
1750
]
],
[
[
839,
842
],
[
853,
856
]
],
[
[
843,
850
],
[
1052,
1059
],
[
1192,
1199
],
[
1377,
1384
],
[
1571,
1578
],
[
1752,
1759
],
[
5566,
5573
],
[
5823,
5830
]
],
[
[
921,
934
],
[
1062,
1075
]
],
[
[
1002,
1016
],
[
1099,
1113
],
[
1132,
1146
],
[
5509,
5523
],
[
5775,
5789
]
],
[
[
1165,
1170
],
[
1244,
1249
]
],
[
[
1225,
1231
],
[
1278,
1284
]
],
[
[
1251,
1257
],
[
1319,
1325
]
],
[
[
1286,
1292
],
[
2750,
2756
],
[
2801,
2807
],
[
3852,
3858
]
],
[
[
1350,
1355
],
[
1439,
1444
]
],
[
[
1407,
1420
],
[
1446,
1459
],
[
1496,
1509
],
[
1942,
1955
]
],
[
[
1544,
1549
],
[
1627,
1632
]
],
[
[
1598,
1608
],
[
1634,
1644
],
[
1681,
1691
],
[
1967,
1977
]
],
[
[
1725,
1730
],
[
1812,
1817
]
],
[
[
1781,
1793
],
[
1819,
1831
],
[
1868,
1880
],
[
1989,
2001
]
],
[
[
1937,
1939
],
[
2028,
2030
],
[
3960,
3962
]
],
[
[
1962,
1964
],
[
2032,
2034
],
[
3964,
3966
]
],
[
[
1984,
1986
],
[
2036,
2038
],
[
3968,
3970
]
],
[
[
2008,
2009
]
],
[
[
2116,
2131
],
[
2711,
2726
]
],
[
[
2692,
2707
],
[
2757,
2772
],
[
2808,
2823
],
[
2897,
2912
],
[
3411,
3426
],
[
5614,
5629
],
[
5673,
5688
],
[
5832,
5847
]
],
[
[
2732,
2738
],
[
3321,
3327
],
[
3714,
3720
],
[
3924,
3930
]
],
[
[
2782,
2789
],
[
3369,
3376
],
[
3762,
3769
],
[
3898,
3905
]
],
[
[
2930,
2933
],
[
3204,
3207
]
],
[
[
3122,
3128
],
[
3229,
3235
]
],
[
[
3152,
3158
],
[
3287,
3293
]
],
[
[
3176,
3179
],
[
4282,
4285
],
[
4346,
4349
],
[
4406,
4409
],
[
5202,
5205
],
[
5242,
5245
],
[
5281,
5284
]
],
[
[
3303,
3309
],
[
3924,
3930
]
],
[
[
3350,
3357
],
[
3898,
3905
]
],
[
[
3444,
3447
],
[
3597,
3600
]
],
[
[
3512,
3518
],
[
3622,
3628
]
],
[
[
3545,
3551
],
[
3680,
3686
]
],
[
[
3569,
3572
],
[
4282,
4285
],
[
4346,
4349
],
[
4406,
4409
],
[
5202,
5205
],
[
5242,
5245
],
[
5281,
5284
]
],
[
[
3696,
3702
],
[
3924,
3930
]
],
[
[
3743,
3750
],
[
3898,
3905
]
],
[
[
3837,
3840
],
[
3906,
3909
],
[
3931,
3934
],
[
3991,
3994
]
],
[
[
3888,
3895
],
[
4149,
4156
],
[
4198,
4205
],
[
4255,
4262
],
[
4305,
4312
],
[
4430,
4437
],
[
4651,
4658
],
[
4847,
4854
],
[
5020,
5027
]
],
[
[
3915,
3921
],
[
4370,
4376
],
[
4680,
4686
]
],
[
[
3940,
3941
],
[
3989,
3990
]
],
[
[
3985,
3986
],
[
4247,
4248
],
[
4290,
4291
],
[
4356,
4357
],
[
4416,
4417
],
[
4648,
4649
],
[
4712,
4713
],
[
4881,
4882
],
[
5056,
5057
]
],
[
[
4072,
4074
],
[
4238,
4240
],
[
4639,
4641
]
],
[
[
4100,
4106
],
[
4478,
4484
],
[
4551,
4557
],
[
5338,
5344
],
[
5411,
5417
]
],
[
[
4120,
4123
],
[
4511,
4514
],
[
4589,
4592
],
[
5371,
5374
],
[
5449,
5452
]
],
[
[
4173,
4176
],
[
4249,
4252
],
[
4263,
4266
],
[
4292,
4295
],
[
4313,
4316
],
[
4358,
4361
],
[
4377,
4380
],
[
4418,
4421
],
[
4438,
4441
]
],
[
[
4223,
4228
],
[
4297,
4302
],
[
4318,
4323
]
],
[
[
4230,
4234
],
[
4363,
4367
],
[
4382,
4386
],
[
4423,
4427
],
[
4443,
4447
]
],
[
[
4338,
4343
],
[
4492,
4497
]
],
[
[
4401,
4403
],
[
4522,
4524
]
],
[
[
4534,
4540
],
[
5721,
5727
]
],
[
[
4575,
4578
],
[
5880,
5883
]
],
[
[
4624,
4629
],
[
4855,
4860
],
[
4883,
4888
]
],
[
[
4631,
4635
],
[
4687,
4691
],
[
4714,
4718
],
[
5028,
5032
],
[
5058,
5062
]
],
[
[
4673,
4677
],
[
4739,
4743
],
[
4763,
4767
]
],
[
[
4705,
4709
],
[
4793,
4797
]
],
[
[
4732,
4736
],
[
4817,
4821
],
[
5258,
5262
]
],
[
[
4786,
4790
],
[
5252,
5256
]
],
[
[
4840,
4844
],
[
4909,
4913
],
[
4933,
4937
]
],
[
[
4874,
4878
],
[
4963,
4967
]
],
[
[
4902,
4906
],
[
4987,
4991
],
[
5216,
5220
]
],
[
[
4956,
4960
],
[
5210,
5214
]
],
[
[
5010,
5017
],
[
5086,
5093
],
[
5113,
5120
]
],
[
[
5046,
5053
],
[
5149,
5156
]
],
[
[
5076,
5083
],
[
5176,
5183
],
[
5300,
5307
]
],
[
[
5139,
5146
],
[
5291,
5298
]
],
[
[
5234,
5239
],
[
5352,
5357
]
],
[
[
5276,
5278
],
[
5382,
5384
]
],
[
[
5394,
5400
],
[
5721,
5727
]
],
[
[
5435,
5438
],
[
5880,
5883
]
],
[
[
5501,
5506
],
[
5705,
5710
]
],
[
[
5767,
5772
],
[
5864,
5869
]
]
] |
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import boto3
import os
import logging
import platform
import pytest
import shutil
import sys
import tempfile
from sagemaker import LocalSession, Session
from sagemaker.pytorch import PyTorch
from .utils import image_utils
logger = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('factory.py').setLevel(logging.INFO)
logging.getLogger('auth.py').setLevel(logging.INFO)
logging.getLogger('connectionpool.py').setLevel(logging.INFO)
dir_path = os.path.dirname(os.path.realpath(__file__))
NO_P2_REGIONS = ['ap-east-1', 'ap-northeast-3', 'ap-southeast-2', 'ca-central-1', 'eu-central-1', 'eu-north-1',
'eu-west-2', 'eu-west-3', 'us-west-1', 'sa-east-1', 'me-south-1']
NO_P3_REGIONS = ['ap-east-1', 'ap-northeast-3', 'ap-southeast-1', 'ap-southeast-2', 'ap-south-1', 'ca-central-1',
'eu-central-1', 'eu-north-1', 'eu-west-2', 'eu-west-3', 'sa-east-1', 'us-west-1', 'me-south-1']
def pytest_addoption(parser):
parser.addoption('--build-image', '-D', action='store_true')
parser.addoption('--build-base-image', '-B', action='store_true')
parser.addoption('--aws-id')
parser.addoption('--instance-type')
parser.addoption('--accelerator-type', default=None)
parser.addoption('--docker-base-name', default='pytorch')
parser.addoption('--region', default='us-west-2')
parser.addoption('--framework-version', default=PyTorch.LATEST_VERSION)
parser.addoption('--py-version', choices=['2', '3'], default=str(sys.version_info.major))
# Processor is still "cpu" for EIA tests
parser.addoption('--processor', choices=['gpu', 'cpu', 'eia'], default='cpu')
# If not specified, will default to {framework-version}-{processor}-py{py-version}
parser.addoption('--tag', default=None)
parser.addoption('--generate-coverage-doc', default=False, action='store_true',
help='use this option to generate test coverage doc')
def pytest_collection_modifyitems(session, config, items):
if config.getoption("--generate-coverage-doc"):
from test.test_utils.test_reporting import TestReportGenerator
report_generator = TestReportGenerator(items, is_sagemaker=True)
report_generator.generate_coverage_doc(framework="pytorch", job_type="inference")
@pytest.fixture(scope='session', name='docker_base_name')
def fixture_docker_base_name(request):
return request.config.getoption('--docker-base-name')
@pytest.fixture(scope='session', name='region')
def fixture_region(request):
return request.config.getoption('--region')
@pytest.fixture(scope='session', name='framework_version')
def fixture_framework_version(request):
return request.config.getoption('--framework-version')
@pytest.fixture(scope='session', name='py_version')
def fixture_py_version(request):
return 'py{}'.format(int(request.config.getoption('--py-version')))
@pytest.fixture(scope='session', name='processor')
def fixture_processor(request):
return request.config.getoption('--processor')
@pytest.fixture(scope='session', name='tag')
def fixture_tag(request, framework_version, processor, py_version):
provided_tag = request.config.getoption('--tag')
default_tag = '{}-{}-{}'.format(framework_version, processor, py_version)
return provided_tag if provided_tag else default_tag
@pytest.fixture(scope='session', name='docker_image')
def fixture_docker_image(docker_base_name, tag):
return '{}:{}'.format(docker_base_name, tag)
@pytest.fixture
def opt_ml():
tmp = tempfile.mkdtemp()
os.mkdir(os.path.join(tmp, 'output'))
# Docker cannot mount Mac OS /var folder properly see
# https://forums.docker.com/t/var-folders-isnt-mounted-properly/9600
opt_ml_dir = '/private{}'.format(tmp) if platform.system() == 'Darwin' else tmp
yield opt_ml_dir
shutil.rmtree(tmp, True)
@pytest.fixture(scope='session', name='use_gpu')
def fixture_use_gpu(processor):
return processor == 'gpu'
@pytest.fixture(scope='session', name='build_base_image', autouse=True)
def fixture_build_base_image(request, framework_version, py_version, processor, tag, docker_base_name):
build_base_image = request.config.getoption('--build-base-image')
if build_base_image:
return image_utils.build_base_image(framework_name=docker_base_name,
framework_version=framework_version,
py_version=py_version,
base_image_tag=tag,
processor=processor,
cwd=os.path.join(dir_path, '..'))
return tag
@pytest.fixture(scope='session', name='sagemaker_session')
def fixture_sagemaker_session(region):
return Session(boto_session=boto3.Session(region_name=region))
@pytest.fixture(scope='session', name='sagemaker_local_session')
def fixture_sagemaker_local_session(region):
return LocalSession(boto_session=boto3.Session(region_name=region))
@pytest.fixture(name='aws_id', scope='session')
def fixture_aws_id(request):
return request.config.getoption('--aws-id')
@pytest.fixture(name='instance_type', scope='session')
def fixture_instance_type(request, processor):
provided_instance_type = request.config.getoption('--instance-type')
default_instance_type = 'local' if processor == 'cpu' else 'local_gpu'
return provided_instance_type or default_instance_type
@pytest.fixture(name='accelerator_type', scope='session')
def fixture_accelerator_type(request):
return request.config.getoption('--accelerator-type')
@pytest.fixture(name='docker_registry', scope='session')
def fixture_docker_registry(aws_id, region):
return '{}.dkr.ecr.{}.amazonaws.com'.format(aws_id, region)
@pytest.fixture(name='ecr_image', scope='session')
def fixture_ecr_image(docker_registry, docker_base_name, tag):
return '{}/{}:{}'.format(docker_registry, docker_base_name, tag)
@pytest.fixture(autouse=True)
def skip_by_device_type(request, use_gpu, instance_type, accelerator_type):
is_gpu = use_gpu or instance_type[3] in ['g', 'p']
is_eia = accelerator_type is not None
# Separate out cases for clearer logic.
# When running GPU test, skip CPU test. When running CPU test, skip GPU test.
if (request.node.get_closest_marker('gpu_test') and not is_gpu) or \
(request.node.get_closest_marker('cpu_test') and is_gpu):
pytest.skip('Skipping because running on \'{}\' instance'.format(instance_type))
# When running EIA test, skip the CPU and GPU functions
elif (request.node.get_closest_marker('gpu_test') or request.node.get_closest_marker('cpu_test')) and is_eia:
pytest.skip('Skipping because running on \'{}\' instance'.format(instance_type))
# When running CPU or GPU test, skip EIA test.
elif request.node.get_closest_marker('eia_test') and not is_eia:
pytest.skip('Skipping because running on \'{}\' instance'.format(instance_type))
@pytest.fixture(autouse=True)
def skip_by_py_version(request, py_version):
if request.node.get_closest_marker('skip_py2') and py_version != 'py3':
pytest.skip('Skipping the test because Python 2 is not supported.')
@pytest.fixture(autouse=True)
def skip_gpu_instance_restricted_regions(region, instance_type):
if (region in NO_P2_REGIONS and instance_type.startswith('ml.p2')) \
or (region in NO_P3_REGIONS and instance_type.startswith('ml.p3')):
pytest.skip('Skipping GPU test in region {}'.format(region))
@pytest.fixture(autouse=True)
def skip_gpu_py2(request, use_gpu, instance_type, py_version, framework_version):
is_gpu = use_gpu or instance_type[3] in ['g', 'p']
if request.node.get_closest_marker('skip_gpu_py2') and is_gpu and py_version != 'py3' \
and framework_version == '1.4.0':
pytest.skip('Skipping the test until mms issue resolved.')
| [
[
[
593,
608
]
],
[
[
617,
622
],
[
5552,
5557
],
[
5736,
5741
]
],
[
[
630,
632
],
[
1206,
1208
],
[
1222,
1224
],
[
4281,
4283
],
[
4290,
4292
],
[
5374,
5376
]
],
[
[
640,
647
],
[
844,
851
],
[
872,
879
],
[
907,
914
],
[
921,
928
],
[
957,
964
],
[
971,
978
],
[
1010,
1017
],
[
1024,
1031
],
[
1065,
1072
],
[
1079,
1086
],
[
1117,
1124
],
[
1131,
1138
],
[
1179,
1186
]
],
[
[
655,
663
],
[
4496,
4504
]
],
[
[
671,
677
],
[
3023,
3029
],
[
3180,
3186
],
[
3307,
3313
],
[
3467,
3473
],
[
3626,
3632
],
[
3762,
3768
],
[
4065,
4071
],
[
4219,
4225
],
[
4589,
4595
],
[
4702,
4708
],
[
5423,
5429
],
[
5590,
5596
],
[
5774,
5780
],
[
5901,
5907
],
[
6212,
6218
],
[
6369,
6375
],
[
6537,
6543
],
[
6722,
6728
],
[
7760,
7766
],
[
7989,
7995
],
[
8303,
8309
],
[
7202,
7208
],
[
7466,
7472
],
[
7676,
7682
],
[
7918,
7924
],
[
8239,
8245
],
[
8615,
8621
]
],
[
[
685,
691
],
[
4561,
4567
]
],
[
[
699,
702
],
[
2231,
2234
]
],
[
[
710,
718
],
[
4258,
4266
]
],
[
[
742,
754
],
[
5710,
5722
]
],
[
[
756,
763
],
[
5531,
5538
]
],
[
[
794,
801
],
[
2138,
2145
]
],
[
[
822,
833
],
[
4987,
4998
]
],
[
[
835,
841
]
],
[
[
1195,
1203
],
[
5387,
5395
]
],
[
[
1251,
1264
],
[
8101,
8114
]
],
[
[
1446,
1459
],
[
8177,
8190
]
],
[
[
1679,
1695
]
],
[
[
2679,
2708
]
],
[
[
3084,
3108
]
],
[
[
3231,
3245
]
],
[
[
3369,
3394
]
],
[
[
3522,
3540
]
],
[
[
3680,
3697
]
],
[
[
3810,
3821
]
],
[
[
4122,
4142
]
],
[
[
4238,
4244
]
],
[
[
4641,
4656
]
],
[
[
4777,
4801
]
],
[
[
5485,
5510
]
],
[
[
5658,
5689
]
],
[
[
5825,
5839
]
],
[
[
5959,
5980
]
],
[
[
6273,
6297
]
],
[
[
6429,
6452
]
],
[
[
6591,
6608
]
],
[
[
6755,
6774
]
],
[
[
7793,
7811
]
],
[
[
8022,
8058
]
],
[
[
8336,
8348
]
]
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, auc
__author__ = "Aurélien Massiot"
__credits__ = "https://github.com/octo-technology/bdacore"
__license__ = "Apache 2.0"
def plot_confusion_matrix(confusion_matrix, classes_list, normalize=True, figsize=(10, 7), fontsize=14, cmap="Blues"):
"""
Display a pretty confusion matrix.
Parameters
----------
confusion_matrix : array-like
classes_list : list,
classes list of the confusion matrix
normalize : boolean,
normalize confusion matrix
figsize : tuple, optional (default=(10,7))
set the figure size
fontsize : int, optional (default=14)
set the font size
cmap : str, optional (default="Blues")
set the colormap
Returns
-------
Confusion matrix figure
Examples
--------
>>> from dsbox.ml.visualization.metrics import plot_confusion_matrix
>>> array = [[ 8458, 227, 1730], \
[ 1073, 37590, 1613], \
[ 2390, 1159, 17540]]
>>> classes_list = ["A", "B", "C"]
>>> plot_confusion_matrix(array, classes_list)
"""
confusion_matrix = np.array(confusion_matrix)
fig, ax = plt.subplots(figsize=figsize)
if normalize:
normalized_cm = np.array(confusion_matrix).astype('float') / np.array(confusion_matrix).sum(axis=1)[:,
np.newaxis]
df_cm = pd.DataFrame(
normalized_cm, index=classes_list, columns=classes_list,
)
plt.matshow(df_cm, fignum=0, cmap=cmap)
else:
df_cm = pd.DataFrame(
confusion_matrix, index=classes_list, columns=classes_list,
)
plt.matshow(df_cm, fignum=0, cmap=cmap)
ax.set_xticks(np.arange(len(classes_list)))
ax.set_yticks(np.arange(len(classes_list)))
ax.set_xticklabels(classes_list)
ax.set_yticklabels(classes_list)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
for i in range(len(classes_list)):
for j in range(len(classes_list)):
ax.text(j, i, confusion_matrix[i, j], ha="center", va="center", color="grey", fontsize=fontsize)
plt.ylabel('True labels')
plt.xlabel('Predicted labels')
plt.show()
def plot_roc_curve(y_test, y_pred_probas, proba_step=None):
"""
Plot ROC curve with probabilities thresholds.
Parameters
----------
y_test : array-like
true labels
y_pred_probas : array-like
predicted labels
proba_step : int (optional) (default=None)
if set, give the step for each probability display. If None, nothing is displayed.
Examples
--------
>>> from dsbox.ml.visualization.metrics import plot_roc_curve
>>> from sklearn import datasets
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.ensemble import RandomForestClassifier
>>> X, y = datasets.make_moons(noise=0.3, random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0)
>>> clf = RandomForestClassifier(n_estimators=10, random_state=42)
>>> _ = clf.fit(X_train, y_train)
>>> y_pred_probas = clf.predict_proba(X_test)
>>> plot_roc_curve(y_test, y_pred_probas, proba_step=2)
"""
fpr, tpr, thresholds = roc_curve(y_test, y_pred_probas[:, 1])
auc_score = auc(fpr, tpr)
plt.figure()
lw = 1
plt.plot(fpr, tpr, color='darkorange', lw=lw, marker='.')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
if proba_step is not None:
i = 0
for x, y, txt in zip(fpr, tpr, thresholds):
if i % proba_step == 0:
plt.annotate(np.round(txt, 2), (x, y - 0.04), color='darkgray', fontsize=8)
i += 1
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic (ROC) - AUC score: {}'.format(str(np.round(auc_score,3))))
plt.show()
| [
[
[
7,
31
],
[
1245,
1248
],
[
1603,
1606
],
[
1773,
1776
],
[
1987,
1990
],
[
2264,
2267
],
[
2294,
2297
],
[
2329,
2332
],
[
3501,
3504
],
[
3529,
3532
],
[
3591,
3594
],
[
3802,
3805
],
[
3901,
3904
],
[
3926,
3929
],
[
3952,
3955
],
[
3990,
3993
],
[
4027,
4030
],
[
4135,
4138
]
],
[
[
39,
50
],
[
1203,
1205
],
[
1318,
1320
],
[
1363,
1365
],
[
1474,
1476
],
[
1831,
1833
],
[
1879,
1881
],
[
3815,
3817
],
[
4106,
4108
]
],
[
[
58,
70
],
[
1502,
1504
],
[
1669,
1671
]
],
[
[
100,
109
],
[
3427,
3436
]
],
[
[
111,
114
],
[
3482,
3485
]
],
[
[
116,
126
]
],
[
[
148,
159
]
],
[
[
207,
218
]
],
[
[
240,
261
]
],
[
[
2346,
2360
]
]
] |
urlpatterns = []
handler404 = "csrf_tests.views.csrf_token_error_handler"
| [
[
[
0,
11
]
],
[
[
18,
28
]
]
] |
"""
Sust Global Climate Explorer API
This API provides programmatic access to physical risk exposure data. For more guidance on using this API, please visit the Sust Global Dev Center: https://developers.sustglobal.com. # noqa: E501
The version of the OpenAPI document: beta
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import sust.api.climate_explorer.clientgen
from sust.api.climate_explorer.clientgen.model.physical_risk_summary_indicators import PhysicalRiskSummaryIndicators
class TestPhysicalRiskSummaryIndicators(unittest.TestCase):
"""PhysicalRiskSummaryIndicators unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPhysicalRiskSummaryIndicators(self):
"""Test PhysicalRiskSummaryIndicators"""
# FIXME: construct object with mandatory attributes with example values
# model = PhysicalRiskSummaryIndicators() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
[
[
354,
357
]
],
[
[
365,
373
],
[
577,
585
],
[
1015,
1023
]
],
[
[
382,
417
]
],
[
[
505,
534
]
],
[
[
543,
576
]
]
] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 eNovance <licensing@enovance.com>
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from oslo_config import cfg
from sysinv.openstack.common import context
from sysinv.openstack.common import log
from sysinv.openstack.common import periodic_task
from sysinv.openstack.common import rpc
from sysinv.openstack.common.rpc import service as rpc_service
from oslo_service import service
cfg.CONF.register_opts([
cfg.IntOpt('periodic_interval',
default=60,
help='seconds between running periodic tasks'),
cfg.StrOpt('host',
default=socket.getfqdn(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
])
CONF = cfg.CONF
class PeriodicService(rpc_service.Service, periodic_task.PeriodicTasks):
def start(self):
super(PeriodicService, self).start()
admin_context = context.RequestContext('admin', 'admin', is_admin=True)
self.tg.add_timer(cfg.CONF.periodic_interval,
self.manager.periodic_tasks,
context=admin_context)
def prepare_service(argv=None):
if argv is None:
argv = []
rpc.set_defaults(control_exchange='sysinv')
cfg.set_defaults(log.log_opts,
default_log_levels=['amqplib=WARN',
'qpid.messaging=INFO',
'sqlalchemy=WARN',
'keystoneclient=INFO',
'stevedore=INFO',
'eventlet.wsgi.server=WARN'
])
cfg.CONF(argv[1:], project='sysinv')
log.setup('sysinv')
def process_launcher():
return service.ProcessLauncher(CONF)
| [
[
[
705,
711
],
[
1211,
1217
]
],
[
[
737,
740
],
[
1014,
1017
],
[
1043,
1046
],
[
1169,
1172
],
[
1562,
1565
],
[
1819,
1822
],
[
2076,
2079
],
[
2528,
2531
]
],
[
[
778,
785
],
[
1737,
1744
]
],
[
[
822,
825
],
[
2093,
2096
],
[
2569,
2572
]
],
[
[
862,
875
],
[
1616,
1629
]
],
[
[
912,
915
],
[
2028,
2031
]
],
[
[
956,
978
],
[
1595,
1606
]
],
[
[
1004,
1011
],
[
2626,
2633
]
],
[
[
1555,
1559
],
[
2650,
2654
]
],
[
[
1579,
1594
],
[
1682,
1697
]
],
[
[
1957,
1972
]
],
[
[
2595,
2611
]
]
] |
# -*- coding: utf-8 -*-
import re
import unittest
from setuptools import setup
def my_test_suite():
"""From http://stackoverflow.com/questions/17001010/.
"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='test_*.py')
return test_suite
with open('rebin.py', 'r') as f:
lines = f.read()
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
lines, re.MULTILINE).group(1)
description = re.search(r'^u\"\"\"(.*)',
lines, re.MULTILINE).group(1)
long_description = re.search('^u\"\"\"(.*)^\"\"\"',
lines, re.MULTILINE | re.DOTALL).group(1)
author = re.search(r'^__author__\s*=\s*[\'"]([^\'"]*)[\'"]',
lines, re.MULTILINE).group(1)
print(long_description)
setup(
name='rebin',
version=version,
description=description,
long_description=long_description,
url='https://github.com/sbrisard/rebin',
author=author,
author_email='',
py_modules=['rebin'],
license='BSD-3',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering'],
test_suite='setup.my_test_suite',
install_requires=['numpy'],
)
| [
[
[
31,
33
],
[
370,
372
],
[
454,
456
],
[
495,
497
],
[
557,
559
],
[
603,
605
],
[
676,
678
],
[
691,
693
],
[
724,
726
],
[
806,
808
]
],
[
[
41,
49
],
[
188,
196
]
],
[
[
74,
79
],
[
855,
860
]
],
[
[
86,
99
]
],
[
[
332,
333
],
[
347,
348
]
],
[
[
339,
344
],
[
447,
452
],
[
550,
555
],
[
669,
674
],
[
799,
804
]
],
[
[
360,
367
],
[
892,
899
]
],
[
[
481,
492
],
[
917,
928
]
],
[
[
584,
600
],
[
836,
852
],
[
951,
967
]
],
[
[
715,
721
],
[
1025,
1031
]
]
] |
# Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of thermodynamic calculations."""
import warnings
import numpy as np
import scipy.integrate as si
import scipy.optimize as so
from .tools import (_greater_or_close, _less_or_close, _remove_nans, find_bounding_indices,
find_intersections, first_derivative, get_layer)
from .. import constants as mpconsts
from ..cbook import broadcast_indices
from ..interpolate.one_dimension import interpolate_1d
from ..package_tools import Exporter
from ..units import check_units, concatenate, units
from ..xarray import preprocess_xarray
exporter = Exporter(globals())
sat_pressure_0c = 6.112 * units.millibar
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[temperature]')
def relative_humidity_from_dewpoint(temperature, dewpoint):
r"""Calculate the relative humidity.
Uses temperature and dewpoint in celsius to calculate relative
humidity using the ratio of vapor pressure to saturation vapor pressures.
Parameters
----------
temperature : `pint.Quantity`
air temperature
dewpoint : `pint.Quantity`
dewpoint temperature
Returns
-------
`pint.Quantity`
relative humidity
See Also
--------
saturation_vapor_pressure
"""
e = saturation_vapor_pressure(dewpoint)
e_s = saturation_vapor_pressure(temperature)
return (e / e_s)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[pressure]')
def exner_function(pressure, reference_pressure=mpconsts.P0):
r"""Calculate the Exner function.
.. math:: \Pi = \left( \frac{p}{p_0} \right)^\kappa
This can be used to calculate potential temperature from temperature (and visa-versa),
since
.. math:: \Pi = \frac{T}{\theta}
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
reference_pressure : `pint.Quantity`, optional
The reference pressure against which to calculate the Exner function, defaults to
metpy.constants.P0
Returns
-------
`pint.Quantity`
The value of the Exner function at the given pressure
See Also
--------
potential_temperature
temperature_from_potential_temperature
"""
return (pressure / reference_pressure).to('dimensionless')**mpconsts.kappa
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def potential_temperature(pressure, temperature):
r"""Calculate the potential temperature.
Uses the Poisson equation to calculation the potential temperature
given `pressure` and `temperature`.
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
temperature : `pint.Quantity`
air temperature
Returns
-------
`pint.Quantity`
The potential temperature corresponding to the temperature and
pressure.
See Also
--------
dry_lapse
Notes
-----
Formula:
.. math:: \Theta = T (P_0 / P)^\kappa
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.potential_temperature(800. * units.mbar, 273. * units.kelvin)
<Quantity(290.9665329591884, 'kelvin')>
"""
return temperature / exner_function(pressure)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def temperature_from_potential_temperature(pressure, potential_temperature):
r"""Calculate the temperature from a given potential temperature.
Uses the inverse of the Poisson equation to calculate the temperature from a
given potential temperature at a specific pressure level.
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
potential_temperature : `pint.Quantity`
potential temperature
Returns
-------
`pint.Quantity`
The temperature corresponding to the potential temperature and pressure.
See Also
--------
dry_lapse
potential_temperature
Notes
-----
Formula:
.. math:: T = \Theta (P / P_0)^\kappa
Examples
--------
>>> from metpy.units import units
>>> from metpy.calc import temperature_from_potential_temperature
>>> # potential temperature
>>> theta = np.array([ 286.12859679, 288.22362587]) * units.kelvin
>>> p = 850 * units.mbar
>>> T = temperature_from_potential_temperature(p, theta)
"""
return potential_temperature * exner_function(pressure)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[pressure]')
def dry_lapse(pressure, temperature, reference_pressure=None):
r"""Calculate the temperature at a level assuming only dry processes.
This function lifts a parcel starting at `temperature`, conserving
potential temperature. The starting pressure can be given by `reference_pressure`.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
reference_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The resulting parcel temperature at levels given by `pressure`
See Also
--------
moist_lapse : Calculate parcel temperature assuming liquid saturation processes
parcel_profile : Calculate complete parcel profile
potential_temperature
"""
if reference_pressure is None:
reference_pressure = pressure[0]
return temperature * (pressure / reference_pressure)**mpconsts.kappa
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[pressure]')
def moist_lapse(pressure, temperature, reference_pressure=None):
r"""Calculate the temperature at a level assuming liquid saturation processes.
This function lifts a parcel starting at `temperature`. The starting pressure can
be given by `reference_pressure`. Essentially, this function is calculating moist
pseudo-adiabats.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
reference_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The temperature corresponding to the starting temperature and
pressure levels.
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [Bakhshaii2013]_.
"""
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
rs = saturation_mixing_ratio(p, t)
frac = ((mpconsts.Rd * t + mpconsts.Lv * rs)
/ (mpconsts.Cp_d + (mpconsts.Lv * mpconsts.Lv * rs * mpconsts.epsilon
/ (mpconsts.Rd * t * t)))).to('kelvin')
return (frac / p).magnitude
if reference_pressure is None:
reference_pressure = pressure[0]
pressure = pressure.to('mbar')
reference_pressure = reference_pressure.to('mbar')
temperature = np.atleast_1d(temperature)
side = 'left'
pres_decreasing = (pressure[0] > pressure[-1])
if pres_decreasing:
# Everything is easier if pressures are in increasing order
pressure = pressure[::-1]
side = 'right'
ref_pres_idx = np.searchsorted(pressure.m, reference_pressure.m, side=side)
ret_temperatures = np.empty((0, temperature.shape[0]))
if reference_pressure > pressure.min():
# Integrate downward in pressure
pres_down = np.append(reference_pressure.m, pressure[(ref_pres_idx - 1)::-1].m)
trace_down = si.odeint(dt, temperature.m.squeeze(), pres_down.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))
if reference_pressure < pressure.max():
# Integrate upward in pressure
pres_up = np.append(reference_pressure.m, pressure[ref_pres_idx:].m)
trace_up = si.odeint(dt, temperature.m.squeeze(), pres_up.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))
if pres_decreasing:
ret_temperatures = ret_temperatures[::-1]
return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def lcl(pressure, temperature, dewpoint, max_iters=50, eps=1e-5):
r"""Calculate the lifted condensation level (LCL) using from the starting point.
The starting state for the parcel is defined by `temperature`, `dewpoint`,
and `pressure`. If these are arrays, this function will return a LCL
for every index. This function does work with surface grids as a result.
Parameters
----------
pressure : `pint.Quantity`
The starting atmospheric pressure
temperature : `pint.Quantity`
The starting temperature
dewpoint : `pint.Quantity`
The starting dewpoint
Returns
-------
`pint.Quantity`
The LCL pressure
`pint.Quantity`
The LCL temperature
Other Parameters
----------------
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired relative error in the calculated value, defaults to 1e-5.
See Also
--------
parcel_profile
Notes
-----
This function is implemented using an iterative approach to solve for the
LCL. The basic algorithm is:
1. Find the dewpoint from the LCL pressure and starting mixing ratio
2. Find the LCL pressure from the starting temperature and dewpoint
3. Iterate until convergence
The function is guaranteed to finish by virtue of the `max_iters` counter.
"""
def _lcl_iter(p, p0, w, t):
td = globals()['dewpoint'](vapor_pressure(units.Quantity(p, pressure.units), w))
return (p0 * (td / t) ** (1. / mpconsts.kappa)).m
w = mixing_ratio(saturation_vapor_pressure(dewpoint), pressure)
lcl_p = so.fixed_point(_lcl_iter, pressure.m, args=(pressure.m, w, temperature),
xtol=eps, maxiter=max_iters)
# np.isclose needed if surface is LCL due to precision error with np.log in dewpoint.
# Causes issues with parcel_profile_with_lcl if removed. Issue #1187
lcl_p = np.where(np.isclose(lcl_p, pressure.m), pressure.m, lcl_p) * pressure.units
return lcl_p, globals()['dewpoint'](vapor_pressure(lcl_p, w)).to(temperature.units)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def lfc(pressure, temperature, dewpoint, parcel_temperature_profile=None, dewpoint_start=None,
which='top'):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature. If this intersection occurs below the LCL,
the LFC is determined to be the same as the LCL, based upon the conditions
set forth in [USAF1990]_, pg 4-14, where a parcel must be lifted dry adiabatically
to saturation before it can freely rise.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpoint : `pint.Quantity`
The dewpoint at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
dewpoint_start: `pint.Quantity`, optional
The dewpoint of the parcel for which to calculate the LFC. Defaults to the surface
dewpoint.
which: str, optional
Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.
'top' returns the lowest-pressure LFC, default.
'bottom' returns the highest-pressure LFC.
'wide' returns the LFC whose corresponding EL is farthest away.
'most_cape' returns the LFC that results in the most CAPE in the profile.
Returns
-------
`pint.Quantity`
The LFC pressure, or array of same if which='all'
`pint.Quantity`
The LFC temperature, or array of same if which='all'
See Also
--------
parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)
pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
if dewpoint_start is None:
dewpoint_start = dewpoint[0]
# The parcel profile and data may have the same first data point.
# If that is the case, ignore that point to get the real first
# intersection for the LFC calculation. Use logarithmic interpolation.
if np.isclose(parcel_temperature_profile[0].to(temperature.units).m, temperature[0].m):
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='increasing', log_x=True)
else:
x, y = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing', log_x=True)
# Compute LCL for this parcel for future comparisons
this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpoint_start)
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
# Is there any positive area above the LCL?
mask = pressure < this_lcl[0]
if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):
# LFC doesn't exist
x, y = np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = this_lcl
return x, y
# LFC exists. Make sure it is no lower than the LCL
else:
idx = x < this_lcl[0]
# LFC height < LCL height, so set LFC = LCL
if not any(idx):
el_pres, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='decreasing',
log_x=True)
if np.min(el_pres) > this_lcl[0]:
x, y = np.nan * pressure.units, np.nan * temperature.units
else:
x, y = this_lcl
return x, y
# Otherwise, find all LFCs that exist above the LCL
# What is returned depends on which flag as described in the docstring
else:
return _multiple_el_lfc_options(x, y, idx, which, pressure,
parcel_temperature_profile, temperature,
dewpoint, intersect_type='LFC')
def _multiple_el_lfc_options(intersect_pressures, intersect_temperatures, valid_x,
which, pressure, parcel_temperature_profile, temperature,
dewpoint, intersect_type):
"""Choose which ELs and LFCs to return from a sounding."""
p_list, t_list = intersect_pressures[valid_x], intersect_temperatures[valid_x]
if which == 'all':
x, y = p_list, t_list
elif which == 'bottom':
x, y = p_list[0], t_list[0]
elif which == 'top':
x, y = p_list[-1], t_list[-1]
elif which == 'wide':
x, y = _wide_option(intersect_type, p_list, t_list, pressure,
parcel_temperature_profile, temperature)
elif which == 'most_cape':
x, y = _most_cape_option(intersect_type, p_list, t_list, pressure, temperature,
dewpoint, parcel_temperature_profile)
else:
raise ValueError('Invalid option for "which". Valid options are "top", "bottom", '
'"wide", "most_cape", and "all".')
return x, y
def _wide_option(intersect_type, p_list, t_list, pressure, parcel_temperature_profile,
temperature):
"""Calculate the LFC or EL that produces the greatest distance between these points."""
# zip the LFC and EL lists together and find greatest difference
if intersect_type == 'LFC':
# Find EL intersection pressure values
lfc_p_list = p_list
el_p_list, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='decreasing',
log_x=True)
else: # intersect_type == 'EL'
el_p_list = p_list
# Find LFC intersection pressure values
lfc_p_list, _ = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing',
log_x=True)
diff = [lfc_p.m - el_p.m for lfc_p, el_p in zip(lfc_p_list, el_p_list)]
return (p_list[np.where(diff == np.max(diff))][0],
t_list[np.where(diff == np.max(diff))][0])
def _most_cape_option(intersect_type, p_list, t_list, pressure, temperature, dewpoint,
parcel_temperature_profile):
"""Calculate the LFC or EL that produces the most CAPE in the profile."""
# Need to loop through all possible combinations of cape, find greatest cape profile
cape_list, pair_list = [], []
for which_lfc in ['top', 'bottom']:
for which_el in ['top', 'bottom']:
cape, _ = cape_cin(pressure, temperature, dewpoint, parcel_temperature_profile,
which_lfc=which_lfc, which_el=which_el)
cape_list.append(cape.m)
pair_list.append([which_lfc, which_el])
(lfc_chosen, el_chosen) = pair_list[np.where(cape_list == np.max(cape_list))[0][0]]
if intersect_type == 'LFC':
if lfc_chosen == 'top':
x, y = p_list[-1], t_list[-1]
else: # 'bottom' is returned
x, y = p_list[0], t_list[0]
else: # EL is returned
if el_chosen == 'top':
x, y = p_list[-1], t_list[-1]
else:
x, y = p_list[0], t_list[0]
return x, y
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def el(pressure, temperature, dewpoint, parcel_temperature_profile=None, which='top'):
r"""Calculate the equilibrium level.
This works by finding the last intersection of the ideal parcel path and
the measured environmental temperature. If there is one or fewer intersections, there is
no equilibrium level.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure profile
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpoint : `pint.Quantity`
The dewpoint at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the EL. Defaults to the
surface parcel profile.
which: str, optional
Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.
'top' returns the lowest-pressure EL, default.
'bottom' returns the highest-pressure EL.
'wide' returns the EL whose corresponding LFC is farthest away.
'most_cape' returns the EL that results in the most CAPE in the profile.
Returns
-------
`pint.Quantity`
The EL pressure, or array of same if which='all'
`pint.Quantity`
The EL temperature, or array of same if which='all'
See Also
--------
parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)
pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
# If the top of the sounding parcel is warmer than the environment, there is no EL
if parcel_temperature_profile[-1] > temperature[-1]:
return np.nan * pressure.units, np.nan * temperature.units
# Interpolate in log space to find the appropriate pressure - units have to be stripped
# and reassigned to allow np.log() to function properly.
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:], temperature[1:],
direction='decreasing', log_x=True)
lcl_p, _ = lcl(pressure[0], temperature[0], dewpoint[0])
idx = x < lcl_p
if len(x) > 0 and x[-1] < lcl_p:
return _multiple_el_lfc_options(x, y, idx, which, pressure,
parcel_temperature_profile, temperature, dewpoint,
intersect_type='EL')
else:
return np.nan * pressure.units, np.nan * temperature.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def parcel_profile(pressure, temperature, dewpoint):
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpoint`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. This array must be from
high to low pressure.
temperature : `pint.Quantity`
The starting temperature
dewpoint : `pint.Quantity`
The starting dewpoint
Returns
-------
`pint.Quantity`
The parcel temperatures at the specified pressure levels.
See Also
--------
lcl, moist_lapse, dry_lapse
"""
_, _, _, t_l, _, t_u = _parcel_profile_helper(pressure, temperature, dewpoint)
return concatenate((t_l, t_u))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def parcel_profile_with_lcl(pressure, temperature, dewpoint):
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpoint`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile. This function returns
a profile that includes the LCL.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. This array must be from
high to low pressure.
temperature : `pint.Quantity`
The atmospheric temperature at the levels in `pressure`. The first entry should be at
the same level as the first `pressure` data point.
dewpoint : `pint.Quantity`
The atmospheric dewpoint at the levels in `pressure`. The first entry should be at
the same level as the first `pressure` data point.
Returns
-------
pressure : `pint.Quantity`
The parcel profile pressures, which includes the specified levels and the LCL
ambient_temperature : `pint.Quantity`
The atmospheric temperature values, including the value interpolated to the LCL level
ambient_dew_point : `pint.Quantity`
The atmospheric dewpoint values, including the value interpolated to the LCL level
profile_temperature : `pint.Quantity`
The parcel profile temperatures at all of the levels in the returned pressures array,
including the LCL.
See Also
--------
lcl, moist_lapse, dry_lapse, parcel_profile
"""
p_l, p_lcl, p_u, t_l, t_lcl, t_u = _parcel_profile_helper(pressure, temperature[0],
dewpoint[0])
new_press = concatenate((p_l, p_lcl, p_u))
prof_temp = concatenate((t_l, t_lcl, t_u))
new_temp = _insert_lcl_level(pressure, temperature, p_lcl)
new_dewp = _insert_lcl_level(pressure, dewpoint, p_lcl)
return new_press, new_temp, new_dewp, prof_temp
def _parcel_profile_helper(pressure, temperature, dewpoint):
"""Help calculate parcel profiles.
Returns the temperature and pressure, above, below, and including the LCL. The
other calculation functions decide what to do with the pieces.
"""
# Find the LCL
press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpoint)
press_lcl = press_lcl.to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the
# LCL is included in the levels. It's slightly redundant in that case, but simplifies
# the logic for removing it later.
press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))
temp_lower = dry_lapse(press_lower, temperature)
# If the pressure profile doesn't make it to the lcl, we can stop here
if _greater_or_close(np.nanmin(pressure.m), press_lcl.m):
return (press_lower[:-1], press_lcl, units.Quantity(np.array([]), press_lower.units),
temp_lower[:-1], temp_lcl, units.Quantity(np.array([]), temp_lower.units))
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))
temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)
# Return profile pieces
return (press_lower[:-1], press_lcl, press_upper[1:],
temp_lower[:-1], temp_lcl, temp_upper[1:])
def _insert_lcl_level(pressure, temperature, lcl_pressure):
"""Insert the LCL pressure into the profile."""
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return np.insert(temperature.m, loc, interp_temp.m) * temperature.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[dimensionless]')
def vapor_pressure(pressure, mixing_ratio):
r"""Calculate water vapor (partial) pressure.
Given total `pressure` and water vapor `mixing_ratio`, calculates the
partial pressure of water vapor.
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
Returns
-------
`pint.Quantity`
The ambient water vapor (partial) pressure in the same units as
`pressure`.
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.71:
.. math:: e = p \frac{r}{r + \epsilon}
See Also
--------
saturation_vapor_pressure, dewpoint
"""
return pressure * mixing_ratio / (mpconsts.epsilon + mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[temperature]')
def saturation_vapor_pressure(temperature):
r"""Calculate the saturation water vapor (partial) pressure.
Parameters
----------
temperature : `pint.Quantity`
air temperature
Returns
-------
`pint.Quantity`
The saturation water vapor (partial) pressure
See Also
--------
vapor_pressure, dewpoint
Notes
-----
Instead of temperature, dewpoint may be used in order to calculate
the actual (ambient) water vapor (partial) pressure.
The formula used is that from [Bolton1980]_ for T in degrees Celsius:
.. math:: 6.112 e^\frac{17.67T}{T + 243.5}
"""
# Converted from original in terms of C to use kelvin. Using raw absolute values of C in
# a formula plays havoc with units support.
return sat_pressure_0c * np.exp(17.67 * (temperature - 273.15 * units.kelvin)
/ (temperature - 29.65 * units.kelvin))
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[dimensionless]')
def dewpoint_from_relative_humidity(temperature, relative_humidity):
r"""Calculate the ambient dewpoint given air temperature and relative humidity.
Parameters
----------
temperature : `pint.Quantity`
air temperature
relative_humidity : `pint.Quantity`
relative humidity expressed as a ratio in the range 0 < relative_humidity <= 1
Returns
-------
`pint.Quantity`
The dewpoint temperature
See Also
--------
dewpoint, saturation_vapor_pressure
"""
if np.any(relative_humidity > 1.2):
warnings.warn('Relative humidity >120%, ensure proper units.')
return dewpoint(relative_humidity * saturation_vapor_pressure(temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def dewpoint(vapor_pressure):
r"""Calculate the ambient dewpoint given the vapor pressure.
Parameters
----------
e : `pint.Quantity`
Water vapor partial pressure
Returns
-------
`pint.Quantity`
dewpoint temperature
See Also
--------
dewpoint_from_relative_humidity, saturation_vapor_pressure, vapor_pressure
Notes
-----
This function inverts the [Bolton1980]_ formula for saturation vapor
pressure to instead calculate the temperature. This yield the following
formula for dewpoint in degrees Celsius:
.. math:: T = \frac{243.5 log(e / 6.112)}{17.67 - log(e / 6.112)}
"""
val = np.log(vapor_pressure / sat_pressure_0c)
return 0. * units.degC + 243.5 * units.delta_degC * val / (17.67 - val)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[pressure]', '[dimensionless]')
def mixing_ratio(partial_press, total_press, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate the mixing ratio of a gas.
This calculates mixing ratio given its partial pressure and the total pressure of
the air. There are no required units for the input arrays, other than that
they have the same units.
Parameters
----------
partial_press : `pint.Quantity`
Partial pressure of the constituent gas
total_press : `pint.Quantity`
Total air pressure
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The (mass) mixing ratio, dimensionless (e.g. Kg/Kg or g/g)
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.73:
.. math:: r = \epsilon \frac{e}{p - e}
See Also
--------
saturation_mixing_ratio, vapor_pressure
"""
return (molecular_weight_ratio * partial_press
/ (total_press - partial_press)).to('dimensionless')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def saturation_mixing_ratio(total_press, temperature):
r"""Calculate the saturation mixing ratio of water vapor.
This calculation is given total pressure and the temperature. The implementation
uses the formula outlined in [Hobbs1977]_ pg.73.
Parameters
----------
total_press: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
air temperature
Returns
-------
`pint.Quantity`
The saturation mixing ratio, dimensionless
"""
return mixing_ratio(saturation_vapor_pressure(temperature), total_press)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def equivalent_potential_temperature(pressure, temperature, dewpoint):
r"""Calculate equivalent potential temperature.
This calculation must be given an air parcel's pressure, temperature, and dewpoint.
The implementation uses the formula outlined in [Bolton1980]_:
First, the LCL temperature is calculated:
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
Which is then used to calculate the potential temperature at the LCL:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{L}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
dewpoint: `pint.Quantity`
Dewpoint of parcel
Returns
-------
`pint.Quantity`
The equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used, since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
td = dewpoint.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(dewpoint).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, dewpoint).magnitude
t_l = 56 + 1. / (1. / (td - 56) + np.log(t / td) / 800.)
th_l = t * (1000 / (p - e)) ** mpconsts.kappa * (t / t_l) ** (0.28 * r)
th_e = th_l * np.exp((3036. / t_l - 1.78) * r * (1 + 0.448 * r))
return th_e * units.kelvin
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def saturation_equivalent_potential_temperature(pressure, temperature):
r"""Calculate saturation equivalent potential temperature.
This calculation must be given an air parcel's pressure and temperature.
The implementation uses the formula outlined in [Bolton1980]_ for the
equivalent potential temperature, and assumes a saturated process.
First, because we assume a saturated process, the temperature at the LCL is
equivalent to the current temperature. Therefore the following equation
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
reduces to
.. math:: T_{L} = T_{K}
Then the potential temperature at the temperature/LCL is calculated:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
However, because
.. math:: T_{L} = T_{K}
it follows that
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{K}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
Returns
-------
`pint.Quantity`
The saturation equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used (for saturated case), since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(temperature).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, temperature).magnitude
th_l = t * (1000 / (p - e)) ** mpconsts.kappa
th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r))
return th_es * units.kelvin
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[dimensionless]', '[dimensionless]')
def virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate virtual temperature.
This calculation must be given an air parcel's temperature and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.80.
Parameters
----------
temperature: `pint.Quantity`
air temperature
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual temperature of the parcel
Notes
-----
.. math:: T_v = T \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
"""
return temperature * ((mixing_ratio + molecular_weight_ratio)
/ (molecular_weight_ratio * (1 + mixing_ratio)))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')
def virtual_potential_temperature(pressure, temperature, mixing_ratio,
molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate virtual potential temperature.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Markowski2010]_ pg.13.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
air temperature
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual potential temperature of the parcel
Notes
-----
.. math:: \Theta_v = \Theta \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
"""
pottemp = potential_temperature(pressure, temperature)
return virtual_temperature(pottemp, mixing_ratio, molecular_weight_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')
def density(pressure, temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
air temperature
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v}
"""
virttemp = virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio)
return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def relative_humidity_wet_psychrometric(pressure, dry_bulb_temperature, web_bulb_temperature,
**kwargs):
r"""Calculate the relative humidity with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8-2014]_, with
coefficients from [Fan1987]_.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
web_bulb_temperature: `pint.Quantity`
Wet bulb temperature
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
.. math:: relative_humidity = \frac{e}{e_s}
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`e` is vapor pressure from the wet psychrometric calculation
* :math:`e_s` is the saturation vapor pressure
See Also
--------
psychrometric_vapor_pressure_wet, saturation_vapor_pressure
"""
return (psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature,
web_bulb_temperature, **kwargs)
/ saturation_vapor_pressure(dry_bulb_temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature, wet_bulb_temperature,
psychrometer_coefficient=6.21e-4 / units.kelvin):
r"""Calculate the vapor pressure with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8-2014]_, with
coefficients from [Fan1987]_.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
wet_bulb_temperature: `pint.Quantity`
Wet bulb temperature
psychrometer_coefficient: `pint.Quantity`, optional
Psychrometer coefficient. Defaults to 6.21e-4 K^-1.
Returns
-------
`pint.Quantity`
Vapor pressure
Notes
-----
.. math:: e' = e'_w(T_w) - A p (T - T_w)
* :math:`e'` is vapor pressure
* :math:`e'_w(T_w)` is the saturation vapor pressure with respect to water at temperature
:math:`T_w`
* :math:`p` is the pressure of the wet bulb
* :math:`T` is the temperature of the dry bulb
* :math:`T_w` is the temperature of the wet bulb
* :math:`A` is the psychrometer coefficient
Psychrometer coefficient depends on the specific instrument being used and the ventilation
of the instrument.
See Also
--------
saturation_vapor_pressure
"""
return (saturation_vapor_pressure(wet_bulb_temperature) - psychrometer_coefficient
* pressure * (dry_bulb_temperature - wet_bulb_temperature).to('kelvin'))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity):
r"""Calculate the mixing ratio from relative humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
relative_humidity: array_like
The relative humidity expressed as a unitless ratio in the range [0, 1]. Can also pass
a percentage if proper units are attached.
Returns
-------
`pint.Quantity`
Dimensionless mixing ratio
Notes
-----
Formula adapted from [Hobbs1977]_ pg. 74.
.. math:: w = (relative_humidity)(w_s)
* :math:`w` is mixing ratio
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`w_s` is the saturation mixing ratio
See Also
--------
relative_humidity_from_mixing_ratio, saturation_mixing_ratio
"""
return (relative_humidity
* saturation_mixing_ratio(pressure, temperature)).to('dimensionless')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def relative_humidity_from_mixing_ratio(pressure, temperature, mixing_ratio):
r"""Calculate the relative humidity from mixing ratio, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`
Dimensionless mass mixing ratio
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
Formula based on that from [Hobbs1977]_ pg. 74.
.. math:: relative_humidity = \frac{w}{w_s}
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`w` is mixing ratio
* :math:`w_s` is the saturation mixing ratio
See Also
--------
mixing_ratio_from_relative_humidity, saturation_mixing_ratio
"""
return mixing_ratio / saturation_mixing_ratio(pressure, temperature)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]')
def mixing_ratio_from_specific_humidity(specific_humidity):
r"""Calculate the mixing ratio from specific humidity.
Parameters
----------
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Mixing ratio
Notes
-----
Formula from [Salby1996]_ pg. 118.
.. math:: w = \frac{q}{1-q}
* :math:`w` is mixing ratio
* :math:`q` is the specific humidity
See Also
--------
mixing_ratio, specific_humidity_from_mixing_ratio
"""
try:
specific_humidity = specific_humidity.to('dimensionless')
except AttributeError:
pass
return specific_humidity / (1 - specific_humidity)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]')
def specific_humidity_from_mixing_ratio(mixing_ratio):
r"""Calculate the specific humidity from the mixing ratio.
Parameters
----------
mixing_ratio: `pint.Quantity`
mixing ratio
Returns
-------
`pint.Quantity`
Specific humidity
Notes
-----
Formula from [Salby1996]_ pg. 118.
.. math:: q = \frac{w}{1+w}
* :math:`w` is mixing ratio
* :math:`q` is the specific humidity
See Also
--------
mixing_ratio, mixing_ratio_from_specific_humidity
"""
try:
mixing_ratio = mixing_ratio.to('dimensionless')
except AttributeError:
pass
return mixing_ratio / (1 + mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def relative_humidity_from_specific_humidity(pressure, temperature, specific_humidity):
r"""Calculate the relative humidity from specific humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
Formula based on that from [Hobbs1977]_ pg. 74. and [Salby1996]_ pg. 118.
.. math:: relative_humidity = \frac{q}{(1-q)w_s}
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`q` is specific humidity
* :math:`w_s` is the saturation mixing ratio
See Also
--------
relative_humidity_from_mixing_ratio
"""
return (mixing_ratio_from_specific_humidity(specific_humidity)
/ saturation_mixing_ratio(pressure, temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def cape_cin(pressure, temperature, dewpoint, parcel_profile, which_lfc='bottom',
which_el='top'):
r"""Calculate CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and parcel path. CIN is integrated between the surface and
LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points
of the measured temperature profile and parcel profile are logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest, in order from highest to
lowest pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
dewpoint : `pint.Quantity`
The atmospheric dewpoint corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel.
which_lfc : str
Choose which LFC to integrate from. Valid options are 'top', 'bottom', 'wide',
and 'most_cape'. Default is 'bottom'.
which_el : str
Choose which EL to integrate to. Valid options are 'top', 'bottom', 'wide',
and 'most_cape'. Default is 'top'.
Returns
-------
`pint.Quantity`
Convective Available Potential Energy (CAPE).
`pint.Quantity`
Convective INhibition (CIN).
Notes
-----
Formula adopted from [Hobbs1977]_.
.. math:: \text{CAPE} = -R_d \int_{LFC}^{EL} (T_{parcel} - T_{env}) d\text{ln}(p)
.. math:: \text{CIN} = -R_d \int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\text{ln}(p)
* :math:`CAPE` Convective available potential energy
* :math:`CIN` Convective inhibition
* :math:`LFC` Pressure of the level of free convection
* :math:`EL` Pressure of the equilibrium level
* :math:`SFC` Level of the surface or beginning of parcel path
* :math:`R_d` Gas constant
* :math:`g` Gravitational acceleration
* :math:`T_{parcel}` Parcel temperature
* :math:`T_{env}` Environment temperature
* :math:`p` Atmospheric pressure
See Also
--------
lfc, el
"""
pressure, temperature, dewpoint, parcel_profile = _remove_nans(pressure, temperature,
dewpoint, parcel_profile)
# Calculate LFC limit of integration
lfc_pressure, _ = lfc(pressure, temperature, dewpoint,
parcel_temperature_profile=parcel_profile, which=which_lfc)
# If there is no LFC, no need to proceed.
if np.isnan(lfc_pressure):
return 0 * units('J/kg'), 0 * units('J/kg')
else:
lfc_pressure = lfc_pressure.magnitude
# Calculate the EL limit of integration
el_pressure, _ = el(pressure, temperature, dewpoint,
parcel_temperature_profile=parcel_profile, which=which_el)
# No EL and we use the top reading of the sounding.
if np.isnan(el_pressure):
el_pressure = pressure[-1].magnitude
else:
el_pressure = el_pressure.magnitude
# Difference between the parcel path and measured temperature profiles
y = (parcel_profile - temperature).to(units.degK)
# Estimate zero crossings
x, y = _find_append_zero_crossings(np.copy(pressure), y)
# CAPE
# Only use data between the LFC and EL for calculation
p_mask = _less_or_close(x.m, lfc_pressure) & _greater_or_close(x.m, el_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cape = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
# CIN
# Only use data between the surface and LFC for calculation
p_mask = _greater_or_close(x.m, lfc_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cin = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
# Set CIN to 0 if it's returned as a positive value (#1190)
if cin > 0 * units('J/kg'):
cin = 0 * units('J/kg')
return cape, cin
def _find_append_zero_crossings(x, y):
r"""
Find and interpolate zero crossings.
Estimate the zero crossings of an x,y series and add estimated crossings to series,
returning a sorted array with no duplicate values.
Parameters
----------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
Returns
-------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
"""
crossings = find_intersections(x[1:], y[1:], np.zeros_like(y[1:]) * y.units, log_x=True)
x = concatenate((x, crossings[0]))
y = concatenate((y, crossings[1]))
# Resort so that data are in order
sort_idx = np.argsort(x)
x = x[sort_idx]
y = y[sort_idx]
# Remove duplicate data points if there are any
keep_idx = np.ediff1d(x.magnitude, to_end=[1]) > 1e-6
x = x[keep_idx]
y = y[keep_idx]
return x, y
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def most_unstable_parcel(pressure, temperature, dewpoint, height=None,
bottom=None, depth=300 * units.hPa):
"""
Determine the most unstable parcel in a layer.
Determines the most unstable parcel of air by calculating the equivalent
potential temperature and finding its maximum in the specified layer.
Parameters
----------
pressure: `pint.Quantity`
Atmospheric pressure profile
temperature: `pint.Quantity`
Atmospheric temperature profile
dewpoint: `pint.Quantity`
Atmospheric dewpoint profile
height: `pint.Quantity`, optional
Atmospheric height profile. Standard atmosphere assumed when None (the default).
bottom: `pint.Quantity`, optional
Bottom of the layer to consider for the calculation in pressure or height.
Defaults to using the bottom pressure or height.
depth: `pint.Quantity`, optional
Depth of the layer to consider for the calculation in pressure or height. Defaults
to 300 hPa.
Returns
-------
`pint.Quantity`
Pressure, temperature, and dewpoint of most unstable parcel in the profile.
integer
Index of the most unstable parcel in the given profile
See Also
--------
get_layer
"""
p_layer, t_layer, td_layer = get_layer(pressure, temperature, dewpoint, bottom=bottom,
depth=depth, height=height, interpolate=False)
theta_e = equivalent_potential_temperature(p_layer, t_layer, td_layer)
max_idx = np.argmax(theta_e)
return p_layer[max_idx], t_layer[max_idx], td_layer[max_idx], max_idx
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[pressure]', '[temperature]')
def isentropic_interpolation(levels, pressure, temperature, *args, axis=0,
temperature_out=False, max_iters=50, eps=1e-6,
bottom_up_search=True, **kwargs):
r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
levels : array
One-dimensional array of desired potential temperature surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
axis : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
temperature_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
args : array, optional
Any additional variables will be interpolated to each isentropic level.
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
See Also
--------
potential_temperature
"""
# iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
# Newton-Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
# Get dimensions in temperature
ndim = temperature.ndim
# Convert units
pres = pressure.to('hPa')
temperature = temperature.to('kelvin')
slices = [np.newaxis] * ndim
slices[axis] = slice(None)
slices = tuple(slices)
pres = np.broadcast_to(pres[slices].magnitude, temperature.shape) * pres.units
# Sort input data
sort_pres = np.argsort(pres.m, axis=axis)
sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)
sorter = broadcast_indices(pres, sort_pres, ndim, axis)
levs = pres[sorter]
tmpk = temperature[sorter]
levels = np.asarray(levels.m_as('kelvin')).reshape(-1)
isentlevels = levels[np.argsort(levels)]
# Make the desired isentropic levels the same shape as temperature
shape = list(temperature.shape)
shape[axis] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)
# exponent to Poisson's Equation, which is imported above
ka = mpconsts.kappa.m_as('dimensionless')
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(levels):
raise ValueError('Input theta level out of data bounds')
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = mpconsts.P0 ** ka
# index values for each point for the pressure level nearest to the desired theta level
above, below, good = find_bounding_indices(pres_theta.m, levels, axis,
from_below=bottom_up_search)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
isentprs = 0.5 * (log_p[above] + log_p[below])
# Make sure we ignore any nans in the data for solving; checking a is enough since it
# combines log_p and tmpk.
good &= ~np.isnan(a)
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(_isen_iter, isentprs[good],
args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),
xtol=eps, maxiter=max_iters)
# get back pressure from log p
isentprs[good] = np.exp(log_p_solved)
# Mask out points we know are bad as well as points that are beyond the max pressure
isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan
# create list for storing output data
ret = [isentprs * units.hPa]
# if temperature_out = true, calculate temperature and output as last item in list
if temperature_out:
ret.append((isentlevs_nd / ((mpconsts.P0.m / isentprs) ** ka)) * units.kelvin)
# do an interpolation for each additional argument
if args:
others = interpolate_1d(isentlevels, pres_theta.m, *(arr[sorter] for arr in args),
axis=axis, return_list_always=True)
ret.extend(others)
return ret
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def surface_based_cape_cin(pressure, temperature, dewpoint):
r"""Calculate surface-based CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile for a surface-based parcel. CIN is integrated
between the surface and LFC, CAPE is integrated between the LFC and EL (or top of
sounding). Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile. The first entry should be the starting
(surface) observation, with the array going from high to low pressure.
temperature : `pint.Quantity`
Temperature profile corresponding to the `pressure` profile.
dewpoint : `pint.Quantity`
Dewpoint profile corresponding to the `pressure` profile.
Returns
-------
`pint.Quantity`
Surface based Convective Available Potential Energy (CAPE).
`pint.Quantity`
Surface based Convective INhibition (CIN).
See Also
--------
cape_cin, parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
return cape_cin(p, t, td, profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def most_unstable_cape_cin(pressure, temperature, dewpoint, **kwargs):
r"""Calculate most unstable CAPE/CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and most unstable parcel path. CIN is integrated between the
surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).
Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
Pressure profile
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dew point profile
kwargs
Additional keyword arguments to pass to `most_unstable_parcel`
Returns
-------
`pint.Quantity`
Most unstable Convective Available Potential Energy (CAPE).
`pint.Quantity`
Most unstable Convective INhibition (CIN).
See Also
--------
cape_cin, most_unstable_parcel, parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
_, _, _, parcel_idx = most_unstable_parcel(pressure, temperature, dewpoint, **kwargs)
p, t, td, mu_profile = parcel_profile_with_lcl(pressure[parcel_idx:],
temperature[parcel_idx:],
dewpoint[parcel_idx:])
return cape_cin(p, t, td, mu_profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def mixed_layer_cape_cin(pressure, temperature, dewpoint, **kwargs):
r"""Calculate mixed-layer CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and mixed-layer parcel path. CIN is integrated between the
surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).
Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated. Kwargs for `mixed_parcel` can be provided, such as `depth`.
Default mixed-layer depth is 100 hPa.
Parameters
----------
pressure : `pint.Quantity`
Pressure profile
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dewpoint profile
kwargs
Additional keyword arguments to pass to `mixed_parcel`
Returns
-------
`pint.Quantity`
Mixed-layer Convective Available Potential Energy (CAPE).
`pint.Quantity`
Mixed-layer Convective INhibition (CIN).
See Also
--------
cape_cin, mixed_parcel, parcel_profile
"""
depth = kwargs.get('depth', 100 * units.hPa)
parcel_pressure, parcel_temp, parcel_dewpoint = mixed_parcel(pressure, temperature,
dewpoint, **kwargs)
# Remove values below top of mixed layer and add in the mixed layer values
pressure_prof = pressure[pressure < (pressure[0] - depth)]
temp_prof = temperature[pressure < (pressure[0] - depth)]
dew_prof = dewpoint[pressure < (pressure[0] - depth)]
pressure_prof = concatenate([parcel_pressure, pressure_prof])
temp_prof = concatenate([parcel_temp, temp_prof])
dew_prof = concatenate([parcel_dewpoint, dew_prof])
p, t, td, ml_profile = parcel_profile_with_lcl(pressure_prof, temp_prof, dew_prof)
return cape_cin(p, t, td, ml_profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def mixed_parcel(pressure, temperature, dewpoint, parcel_start_pressure=None,
height=None, bottom=None, depth=100 * units.hPa, interpolate=True):
r"""Calculate the properties of a parcel mixed from a layer.
Determines the properties of an air parcel that is the result of complete mixing of a
given atmospheric layer.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
dewpoint : `pint.Quantity`
Atmospheric dewpoint profile
parcel_start_pressure : `pint.Quantity`, optional
Pressure at which the mixed parcel should begin (default None)
height: `pint.Quantity`, optional
Atmospheric heights corresponding to the given pressures (default None)
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure
(default None)
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer
(default 100 hPa)
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data
Returns
-------
`pint.Quantity`
The pressure of the mixed parcel
`pint.Quantity`
The temperature of the mixed parcel
`pint.Quantity`
The dewpoint of the mixed parcel
"""
# If a parcel starting pressure is not provided, use the surface
if not parcel_start_pressure:
parcel_start_pressure = pressure[0]
# Calculate the potential temperature and mixing ratio over the layer
theta = potential_temperature(pressure, temperature)
mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)
# Mix the variables over the layer
mean_theta, mean_mixing_ratio = mixed_layer(pressure, theta, mixing_ratio, bottom=bottom,
height=height, depth=depth,
interpolate=interpolate)
# Convert back to temperature
mean_temperature = mean_theta * exner_function(parcel_start_pressure)
# Convert back to dewpoint
mean_vapor_pressure = vapor_pressure(parcel_start_pressure, mean_mixing_ratio)
# Using globals() here allows us to keep the dewpoint parameter but still call the
# function of the same name.
mean_dewpoint = globals()['dewpoint'](mean_vapor_pressure)
return (parcel_start_pressure, mean_temperature.to(temperature.units),
mean_dewpoint.to(dewpoint.units))
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def mixed_layer(pressure, *args, height=None, bottom=None, depth=100 * units.hPa,
interpolate=True):
r"""Mix variable(s) over a layer, yielding a mass-weighted average.
This function will integrate a data variable with respect to pressure and determine the
average value using the mean value theorem.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
datavar : array-like
Atmospheric variable measured at the given pressures
height: array-like, optional
Atmospheric heights corresponding to the given pressures (default None)
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure
(default None)
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer
(default 100 hPa)
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data (default True)
Returns
-------
`pint.Quantity`
The mixed value of the data variable.
"""
layer = get_layer(pressure, *args, height=height, bottom=bottom,
depth=depth, interpolate=interpolate)
p_layer = layer[0]
datavars_layer = layer[1:]
ret = []
for datavar_layer in datavars_layer:
actual_depth = abs(p_layer[0] - p_layer[-1])
ret.append((-1. / actual_depth.m) * np.trapz(datavar_layer.m, p_layer.m)
* datavar_layer.units)
return ret
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def dry_static_energy(height, temperature):
r"""Calculate the dry static energy of parcels.
This function will calculate the dry static energy following the first two terms of
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{dry static energy} = c_{pd} * T + gz
* :math:`T` is temperature
* :math:`z` is height
Parameters
----------
height : `pint.Quantity`
Atmospheric height
temperature : `pint.Quantity`
Air temperature
Returns
-------
`pint.Quantity`
The dry static energy
"""
return (mpconsts.g * height + mpconsts.Cp_d * temperature).to('kJ/kg')
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]', '[dimensionless]')
def moist_static_energy(height, temperature, specific_humidity):
r"""Calculate the moist static energy of parcels.
This function will calculate the moist static energy following
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{moist static energy} = c_{pd} * T + gz + L_v q
* :math:`T` is temperature
* :math:`z` is height
* :math:`q` is specific humidity
Parameters
----------
height : `pint.Quantity`
Atmospheric height
temperature : `pint.Quantity`
Air temperature
specific_humidity : `pint.Quantity`
Atmospheric specific humidity
Returns
-------
`pint.Quantity`
The moist static energy
"""
return (dry_static_energy(height, temperature)
+ mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def thickness_hydrostatic(pressure, temperature, mixing_ratio=None,
molecular_weight_ratio=mpconsts.epsilon, bottom=None, depth=None):
r"""Calculate the thickness of a layer via the hypsometric equation.
This thickness calculation uses the pressure and temperature profiles (and optionally
mixing ratio) via the hypsometric equation with virtual temperature adjustment
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
mixing_ratio : `pint.Quantity`, optional
Profile of dimensionless mass mixing ratio. If none is given, virtual temperature
is simply set to be the given temperature.
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic_from_relative_humidity, pressure_to_height_std, virtual_temperature
"""
# Get the data for the layer, conditional upon bottom/depth being specified and mixing
# ratio being given
if bottom is None and depth is None:
if mixing_ratio is None:
layer_p, layer_virttemp = pressure, temperature
else:
layer_p = pressure
layer_virttemp = virtual_temperature(temperature, mixing_ratio,
molecular_weight_ratio)
else:
if mixing_ratio is None:
layer_p, layer_virttemp = get_layer(pressure, temperature, bottom=bottom,
depth=depth)
else:
layer_p, layer_temp, layer_w = get_layer(pressure, temperature, mixing_ratio,
bottom=bottom, depth=depth)
layer_virttemp = virtual_temperature(layer_temp, layer_w, molecular_weight_ratio)
# Take the integral (with unit handling) and return the result in meters
return (- mpconsts.Rd / mpconsts.g * np.trapz(
layer_virttemp.m_as('K'), x=np.log(layer_p.m_as('hPa'))) * units.K).to('m')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def thickness_hydrostatic_from_relative_humidity(pressure, temperature, relative_humidity,
bottom=None, depth=None):
r"""Calculate the thickness of a layer given pressure, temperature and relative humidity.
Similar to ``thickness_hydrostatic``, this thickness calculation uses the pressure,
temperature, and relative humidity profiles via the hypsometric equation with virtual
temperature adjustment.
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_. Virtual temperature is calculated
from the profiles of temperature and relative humidity.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
relative_humidity : `pint.Quantity`
Atmospheric relative humidity profile. The relative humidity is expressed as a
unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are
attached.
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic, pressure_to_height_std, virtual_temperature,
mixing_ratio_from_relative_humidity
"""
mixing = mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity)
return thickness_hydrostatic(pressure, temperature, mixing_ratio=mixing, bottom=bottom,
depth=depth)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_frequency_squared(height, potential_temperature, axis=0):
r"""Calculate the square of the Brunt-Vaisala frequency.
Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the
formula:
.. math:: N^2 = \frac{g}{\theta} \frac{d\theta}{dz}
This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
Parameters
----------
height : `pint.Quantity`
One-dimensional profile of atmospheric height
potential_temperature : `pint.Quantity`
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
`pint.Quantity`
The square of the Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature
"""
# Ensure validity of temperature units
potential_temperature = potential_temperature.to('K')
# Calculate and return the square of Brunt-Vaisala frequency
return mpconsts.g / potential_temperature * first_derivative(potential_temperature,
x=height, axis=axis)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_frequency(height, potential_temperature, axis=0):
r"""Calculate the Brunt-Vaisala frequency.
This function will calculate the Brunt-Vaisala frequency as follows:
.. math:: N = \left( \frac{g}{\theta} \frac{d\theta}{dz} \right)^\frac{1}{2}
This formula based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
This function is a wrapper for `brunt_vaisala_frequency_squared` that filters out negative
(unstable) quantities and takes the square root.
Parameters
----------
height : `pint.Quantity`
One-dimensional profile of atmospheric height
potential_temperature : `pint.Quantity`
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
`pint.Quantity`
Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency_squared, brunt_vaisala_period, potential_temperature
"""
bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,
axis=axis)
bv_freq_squared[bv_freq_squared.magnitude < 0] = np.nan
return np.sqrt(bv_freq_squared)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_period(height, potential_temperature, axis=0):
r"""Calculate the Brunt-Vaisala period.
This function is a helper function for `brunt_vaisala_frequency` that calculates the
period of oscillation as in Exercise 3.13 of [Hobbs2006]_:
.. math:: \tau = \frac{2\pi}{N}
Returns `NaN` when :math:`N^2 > 0`.
Parameters
----------
height : `pint.Quantity`
One-dimensional profile of atmospheric height
potential_temperature : pint.Quantity`
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
`pint.Quantity`
Brunt-Vaisala period.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_frequency_squared, potential_temperature
"""
bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,
axis=axis)
bv_freq_squared[bv_freq_squared.magnitude <= 0] = np.nan
return 2 * np.pi / np.sqrt(bv_freq_squared)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def wet_bulb_temperature(pressure, temperature, dewpoint):
"""Calculate the wet-bulb temperature using Normand's rule.
This function calculates the wet-bulb temperature using the Normand method. The LCL is
computed, and that parcel brought down to the starting pressure along a moist adiabat.
The Normand method (and others) are described and compared by [Knox2017]_.
Parameters
----------
pressure : `pint.Quantity`
Initial atmospheric pressure
temperature : `pint.Quantity`
Initial atmospheric temperature
dewpoint : `pint.Quantity`
Initial atmospheric dewpoint
Returns
-------
`pint.Quantity`
Wet-bulb temperature
See Also
--------
lcl, moist_lapse
"""
if not hasattr(pressure, 'shape'):
pressure = np.atleast_1d(pressure)
temperature = np.atleast_1d(temperature)
dewpoint = np.atleast_1d(dewpoint)
it = np.nditer([pressure, temperature, dewpoint, None],
op_dtypes=['float', 'float', 'float', 'float'],
flags=['buffered'])
for press, temp, dewp, ret in it:
press = press * pressure.units
temp = temp * temperature.units
dewp = dewp * dewpoint.units
lcl_pressure, lcl_temperature = lcl(press, temp, dewp)
moist_adiabat_temperatures = moist_lapse(concatenate([lcl_pressure, press]),
lcl_temperature)
ret[...] = moist_adiabat_temperatures[-1].magnitude
# If we started with a scalar, return a scalar
if it.operands[3].size == 1:
return it.operands[3][0] * moist_adiabat_temperatures.units
return it.operands[3] * moist_adiabat_temperatures.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def static_stability(pressure, temperature, axis=0):
r"""Calculate the static stability within a vertical profile.
.. math:: \sigma = -\frac{RT}{p} \frac{\partial \ln \theta}{\partial p}
This formula is based on equation 4.3.6 in [Bluestein1992]_.
Parameters
----------
pressure : `pint.Quantity`
Profile of atmospheric pressure
temperature : `pint.Quantity`
Profile of temperature
axis : int, optional
The axis corresponding to vertical in the pressure and temperature arrays, defaults
to 0.
Returns
-------
`pint.Quantity`
The profile of static stability.
"""
theta = potential_temperature(pressure, temperature)
return - mpconsts.Rd * temperature / pressure * first_derivative(np.log(theta.m_as('K')),
x=pressure, axis=axis)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def dewpoint_from_specific_humidity(pressure, temperature, specific_humidity):
r"""Calculate the dewpoint from specific humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Dew point temperature
See Also
--------
relative_humidity_from_mixing_ratio, dewpoint_from_relative_humidity
"""
return dewpoint_from_relative_humidity(temperature,
relative_humidity_from_specific_humidity(
pressure, temperature, specific_humidity))
@exporter.export
@preprocess_xarray
@check_units('[length]/[time]', '[pressure]', '[temperature]')
def vertical_velocity_pressure(w, pressure, temperature, mixing_ratio=0):
r"""Calculate omega from w assuming hydrostatic conditions.
This function converts vertical velocity with respect to height
:math:`\left(w = \frac{Dz}{Dt}\right)` to that
with respect to pressure :math:`\left(\omega = \frac{Dp}{Dt}\right)`
assuming hydrostatic conditions on the synoptic scale.
By Equation 7.33 in [Hobbs2006]_,
.. math:: \omega \simeq -\rho g w
Density (:math:`\rho`) is calculated using the :func:`density` function,
from the given pressure and temperature. If `mixing_ratio` is given, the virtual
temperature correction is used, otherwise, dry air is assumed.
Parameters
----------
w: `pint.Quantity`
Vertical velocity in terms of height
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`, optional
Mixing_ratio ratio of air
Returns
-------
`pint.Quantity`
Vertical velocity in terms of pressure (in Pascals / second)
See Also
--------
density, vertical_velocity
"""
rho = density(pressure, temperature, mixing_ratio)
return (-mpconsts.g * rho * w).to('Pa/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]/[time]', '[pressure]', '[temperature]')
def vertical_velocity(omega, pressure, temperature, mixing_ratio=0):
r"""Calculate w from omega assuming hydrostatic conditions.
This function converts vertical velocity with respect to pressure
:math:`\left(\omega = \frac{Dp}{Dt}\right)` to that with respect to height
:math:`\left(w = \frac{Dz}{Dt}\right)` assuming hydrostatic conditions on
the synoptic scale. By Equation 7.33 in [Hobbs2006]_,
.. math:: \omega \simeq -\rho g w
so that
.. math:: w \simeq \frac{- \omega}{\rho g}
Density (:math:`\rho`) is calculated using the :func:`density` function,
from the given pressure and temperature. If `mixing_ratio` is given, the virtual
temperature correction is used, otherwise, dry air is assumed.
Parameters
----------
omega: `pint.Quantity`
Vertical velocity in terms of pressure
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`, optional
Mixing ratio of air
Returns
-------
`pint.Quantity`
Vertical velocity in terms of height (in meters / second)
See Also
--------
density, vertical_velocity_pressure
"""
rho = density(pressure, temperature, mixing_ratio)
return (omega / (- mpconsts.g * rho)).to('m/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def specific_humidity_from_dewpoint(pressure, dewpoint):
r"""Calculate the specific humidity from the dewpoint temperature and pressure.
Parameters
----------
dewpoint: `pint.Quantity`
dewpoint temperature
pressure: `pint.Quantity`
pressure
Returns
-------
`pint.Quantity`
Specific humidity
See Also
--------
mixing_ratio, saturation_mixing_ratio
"""
mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)
return specific_humidity_from_mixing_ratio(mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def lifted_index(pressure, temperature, parcel_profile):
"""Calculate Lifted Index from the pressure temperature and parcel profile.
Lifted index formula derived from [Galway1956]_ and referenced by [Doswell-Schultz2006]_:
LI = T500 - Tp500
where:
T500 is the measured temperature at 500 hPa.
Tp500 is the temperature of the lifted parcel at 500 hPa.
Calculation of the lifted index is defined as the temperature difference between the
observed 500 hPa temperature and the temperature of a parcel lifted from the
surface to 500 hPa.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest, in order from highest to
lowest pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel.
Returns
-------
`pint.Quantity`
Lifted Index.
"""
# find the index for the 500 hPa pressure level.
idx = np.where(pressure == 500 * units.hPa)
# find the measured temperature at 500 hPa.
T500 = temperature[idx]
# find the parcel profile temperature at 500 hPa.
Tp500 = parcel_profile[idx]
# calculate the lifted index.
lifted_index = T500 - Tp500.to(units.degC)
return lifted_index
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]', '[speed]', '[speed]')
def gradient_richardson_number(height, potential_temperature, u, v, axis=0):
r"""Calculate the gradient (or flux) Richardson number.
.. math:: Ri = (g/\theta) * \frac{\left(\partial \theta/\partial z\)}
{[\left(\partial u / \partial z\right)^2 + \left(\partial v / \partial z\right)^2}
See [Holton2004]_ pg. 121-122. As noted by [Holton2004]_, flux Richardson
number values below 0.25 indicate turbulence.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
potential_temperature : `pint.Quantity`
Atmospheric potential temperature
u : `pint.Quantity`
x component of the wind
v : `pint.Quantity`
y component of the wind
axis : int, optional
The axis corresponding to vertical, defaults to 0.
Returns
-------
`pint.Quantity`
Gradient Richardson number
"""
dthetadz = first_derivative(potential_temperature, x=height, axis=axis)
dudz = first_derivative(u, x=height, axis=axis)
dvdz = first_derivative(v, x=height, axis=axis)
return (mpconsts.g / potential_temperature) * (dthetadz / (dudz ** 2 + dvdz ** 2))
| [
[
[
229,
237
],
[
29809,
29817
]
],
[
[
246,
257
],
[
7814,
7816
],
[
8081,
8083
],
[
8166,
8168
],
[
8308,
8310
],
[
8484,
8486
],
[
8640,
8642
],
[
8803,
8805
],
[
11101,
11103
],
[
11110,
11112
],
[
13890,
13892
],
[
14717,
14719
],
[
14845,
14847
],
[
14870,
14872
],
[
15392,
15394
],
[
15446,
15448
],
[
15471,
15473
],
[
18079,
18081
],
[
18096,
18098
],
[
18134,
18136
],
[
18151,
18153
],
[
18886,
18888
],
[
18908,
18910
],
[
21415,
21417
],
[
21440,
21442
],
[
22143,
22145
],
[
22168,
22170
],
[
26178,
26180
],
[
26275,
26277
],
[
26367,
26369
],
[
27150,
27152
],
[
29022,
29024
],
[
29768,
29770
],
[
30689,
30691
],
[
34556,
34558
],
[
34673,
34675
],
[
36803,
36805
],
[
51099,
51101
],
[
51480,
51482
],
[
51802,
51804
],
[
52091,
52093
],
[
52111,
52113
],
[
52397,
52399
],
[
52417,
52419
],
[
53164,
53166
],
[
53341,
53343
],
[
53463,
53465
],
[
55220,
55222
],
[
58053,
58055
],
[
58141,
58143
],
[
58252,
58254
],
[
58298,
58300
],
[
58310,
58312
],
[
58487,
58489
],
[
58558,
58560
],
[
58740,
58742
],
[
59059,
59061
],
[
59082,
59084
],
[
59275,
59277
],
[
60003,
60005
],
[
60380,
60382
],
[
60558,
60560
],
[
60538,
60540
],
[
70479,
70481
],
[
75120,
75122
],
[
75166,
75168
],
[
79854,
79856
],
[
79873,
79875
],
[
81035,
81037
],
[
81058,
81060
],
[
81066,
81068
],
[
82005,
82007
],
[
82051,
82053
],
[
82097,
82099
],
[
82131,
82133
],
[
83795,
83797
],
[
89429,
89431
],
[
57680,
57682
]
],
[
[
265,
286
],
[
8397,
8399
],
[
8718,
8720
]
],
[
[
294,
314
],
[
10796,
10798
],
[
60127,
60129
]
],
[
[
336,
353
],
[
26160,
26177
],
[
51944,
51961
],
[
52251,
52268
]
],
[
[
355,
369
],
[
14724,
14738
],
[
51908,
51922
],
[
60513,
60527
]
],
[
[
371,
383
],
[
13187,
13199
],
[
20841,
20853
],
[
50730,
50742
],
[
62411,
62423
],
[
63778,
63790
]
],
[
[
385,
406
],
[
59482,
59503
]
],
[
[
428,
446
],
[
13990,
14008
],
[
14167,
14185
],
[
15171,
15189
],
[
17455,
17473
],
[
17792,
17810
],
[
21632,
21650
],
[
53131,
53149
]
],
[
[
448,
464
],
[
78439,
78455
],
[
83778,
83794
],
[
90747,
90763
],
[
90819,
90835
],
[
90871,
90887
]
],
[
[
466,
475
],
[
54983,
54992
],
[
70156,
70165
],
[
74613,
74622
],
[
74779,
74788
]
],
[
[
492,
513
],
[
1668,
1676
],
[
30972,
30980
],
[
37065,
37073
],
[
38291,
38299
],
[
39567,
39575
],
[
72453,
72461
],
[
2458,
2466
],
[
5815,
5823
],
[
28117,
28125
],
[
34614,
34622
],
[
36769,
36777
],
[
40529,
40537
],
[
52064,
52072
],
[
52371,
52379
],
[
58856,
58864
],
[
59346,
59354
],
[
60790,
60798
],
[
71249,
71257
],
[
71271,
71279
],
[
72189,
72197
],
[
75093,
75101
],
[
75107,
75115
],
[
78402,
78410
],
[
83739,
83747
],
[
86160,
86168
],
[
87609,
87617
],
[
90925,
90933
],
[
7394,
7402
],
[
7412,
7420
],
[
7449,
7457
],
[
7466,
7474
],
[
7480,
7488
],
[
7499,
7507
],
[
7555,
7563
],
[
10696,
10704
]
],
[
[
534,
551
],
[
58371,
58388
]
],
[
[
592,
606
],
[
26891,
26905
],
[
60926,
60940
]
],
[
[
635,
643
],
[
747,
755
]
],
[
[
664,
675
],
[
848,
859
],
[
1580,
1591
],
[
2512,
2523
],
[
3457,
3468
],
[
4668,
4679
],
[
5869,
5880
],
[
9042,
9053
],
[
11305,
11316
],
[
19328,
19339
],
[
22234,
22245
],
[
23255,
23266
],
[
27254,
27265
],
[
28189,
28200
],
[
29190,
29201
],
[
29991,
30002
],
[
30845,
30856
],
[
32195,
32206
],
[
32871,
32882
],
[
34795,
34806
],
[
36924,
36935
],
[
38082,
38093
],
[
39414,
39425
],
[
40631,
40642
],
[
41961,
41972
],
[
43621,
43632
],
[
44783,
44794
],
[
45797,
45808
],
[
46582,
46593
],
[
47334,
47345
],
[
48428,
48439
],
[
53601,
53612
],
[
55352,
55363
],
[
61150,
61161
],
[
62616,
62627
],
[
64220,
64231
],
[
66252,
66263
],
[
68969,
68980
],
[
70612,
70623
],
[
71351,
71362
],
[
72293,
72304
],
[
75253,
75264
],
[
77287,
77298
],
[
78604,
78615
],
[
79937,
79948
],
[
81130,
81141
],
[
82970,
82981
],
[
83951,
83962
],
[
84853,
84864
],
[
86232,
86243
],
[
87677,
87688
],
[
88311,
88322
],
[
89773,
89784
]
],
[
[
677,
688
],
[
23192,
23203
],
[
25083,
25094
],
[
25130,
25141
],
[
25966,
25977
],
[
26481,
26492
],
[
53216,
53227
],
[
53255,
53266
],
[
65927,
65938
],
[
65989,
66000
],
[
66042,
66053
],
[
82555,
82566
]
],
[
[
690,
695
],
[
794,
799
],
[
42184,
42189
],
[
53782,
53787
],
[
66445,
66450
],
[
69066,
69071
],
[
8939,
8944
],
[
26260,
26265
],
[
26352,
26357
],
[
29061,
29066
],
[
29136,
29141
],
[
30746,
30751
],
[
30767,
30772
],
[
34743,
34748
],
[
36872,
36877
],
[
40557,
40562
],
[
40574,
40579
],
[
51142,
51147
],
[
51161,
51166
],
[
51720,
51725
],
[
52132,
52137
],
[
52148,
52153
],
[
52438,
52443
],
[
52454,
52459
],
[
52551,
52556
],
[
52584,
52589
],
[
60630,
60635
],
[
60826,
60831
],
[
65460,
65465
],
[
75197,
75202
],
[
89456,
89461
],
[
89698,
89703
],
[
7251,
7256
],
[
7300,
7305
],
[
10618,
10623
]
],
[
[
717,
734
],
[
829,
846
],
[
1561,
1578
],
[
2493,
2510
],
[
3438,
3455
],
[
4649,
4666
],
[
5850,
5867
],
[
9023,
9040
],
[
11286,
11303
],
[
19309,
19326
],
[
22215,
22232
],
[
23236,
23253
],
[
27235,
27252
],
[
28170,
28187
],
[
29171,
29188
],
[
29972,
29989
],
[
30826,
30843
],
[
32176,
32193
],
[
32852,
32869
],
[
34776,
34793
],
[
36905,
36922
],
[
38063,
38080
],
[
39395,
39412
],
[
40612,
40629
],
[
41942,
41959
],
[
43602,
43619
],
[
44764,
44781
],
[
45778,
45795
],
[
46563,
46580
],
[
47315,
47332
],
[
48409,
48426
],
[
53582,
53599
],
[
55333,
55350
],
[
61131,
61148
],
[
62597,
62614
],
[
64201,
64218
],
[
66233,
66250
],
[
68950,
68967
],
[
70593,
70610
],
[
71332,
71349
],
[
72274,
72291
],
[
75234,
75251
],
[
77268,
77285
],
[
78585,
78602
],
[
79918,
79935
],
[
81111,
81128
],
[
82951,
82968
],
[
83932,
83949
],
[
84834,
84851
],
[
86213,
86230
],
[
87658,
87675
],
[
88292,
88309
],
[
89754,
89771
]
],
[
[
736,
744
],
[
812,
820
],
[
1544,
1552
],
[
2476,
2484
],
[
3421,
3429
],
[
4632,
4640
],
[
5833,
5841
],
[
9006,
9014
],
[
11269,
11277
],
[
19292,
19300
],
[
22198,
22206
],
[
23219,
23227
],
[
27218,
27226
],
[
28153,
28161
],
[
29154,
29162
],
[
29955,
29963
],
[
30809,
30817
],
[
32159,
32167
],
[
32835,
32843
],
[
34759,
34767
],
[
36888,
36896
],
[
38046,
38054
],
[
39378,
39386
],
[
40595,
40603
],
[
41925,
41933
],
[
43585,
43593
],
[
44747,
44755
],
[
45761,
45769
],
[
46546,
46554
],
[
47298,
47306
],
[
48392,
48400
],
[
53565,
53573
],
[
55316,
55324
],
[
61114,
61122
],
[
62580,
62588
],
[
64184,
64192
],
[
66216,
66224
],
[
68933,
68941
],
[
70576,
70584
],
[
71315,
71323
],
[
72257,
72265
],
[
75217,
75225
],
[
77251,
77259
],
[
78568,
78576
],
[
79901,
79909
],
[
81094,
81102
],
[
82934,
82942
],
[
83915,
83923
],
[
84817,
84825
],
[
86196,
86204
],
[
87641,
87649
],
[
88275,
88283
],
[
89737,
89745
]
],
[
[
768,
783
],
[
29004,
29019
],
[
30713,
30728
]
],
[
[
898,
929
]
],
[
[
1624,
1638
],
[
3393,
3407
],
[
4604,
4618
],
[
68471,
68485
]
],
[
[
2559,
2580
],
[
39252,
39273
],
[
58948,
58969
],
[
68009,
68030
],
[
83680,
83701
]
],
[
[
3504,
3542
]
],
[
[
4729,
4738
],
[
26041,
26050
]
],
[
[
5930,
5941
],
[
26555,
26566
],
[
82543,
82554
]
],
[
[
9106,
9109
],
[
14380,
14383
],
[
21795,
21798
],
[
25643,
25646
],
[
82483,
82486
]
],
[
[
11386,
11389
],
[
50922,
50925
]
],
[
[
15964,
15988
],
[
15744,
15768
],
[
21913,
21937
]
],
[
[
17050,
17062
],
[
16553,
16565
]
],
[
[
18176,
18193
],
[
16723,
16740
]
],
[
[
19409,
19411
],
[
51297,
51299
]
],
[
[
22298,
22312
]
],
[
[
23319,
23342
],
[
13378,
13401
],
[
21032,
21055
],
[
62481,
62504
],
[
63941,
63964
],
[
66111,
66134
]
],
[
[
25342,
25364
],
[
23125,
23147
],
[
24943,
24965
]
],
[
[
26765,
26782
],
[
25176,
25193
],
[
25239,
25256
]
],
[
[
27303,
27317
],
[
11218,
11232
],
[
68567,
68581
],
[
10603,
10617
]
],
[
[
28222,
28247
],
[
1435,
1460
],
[
1481,
1506
],
[
10737,
10762
],
[
29912,
29937
],
[
32779,
32804
],
[
34399,
34424
],
[
36609,
36634
],
[
41873,
41898
],
[
43422,
43447
]
],
[
[
29242,
29273
],
[
84594,
84625
]
],
[
[
30021,
30029
],
[
29883,
29891
]
],
[
[
30908,
30920
],
[
10724,
10736
],
[
32766,
32778
]
],
[
[
32242,
32265
],
[
34463,
34486
],
[
36676,
36699
],
[
44676,
44699
],
[
45711,
45734
],
[
48341,
48364
],
[
68073,
68096
],
[
88167,
88190
],
[
7347,
7370
]
],
[
[
32935,
32967
],
[
55145,
55177
]
],
[
[
34842,
34885
]
],
[
[
36995,
37014
],
[
39308,
39327
],
[
40434,
40453
],
[
74412,
74431
],
[
74936,
74955
]
],
[
[
38167,
38196
]
],
[
[
39499,
39506
],
[
86102,
86109
],
[
87541,
87548
]
],
[
[
40695,
40730
]
],
[
[
42025,
42057
],
[
41717,
41749
]
],
[
[
43687,
43722
],
[
77031,
77066
]
],
[
[
44849,
44884
]
],
[
[
45832,
45867
],
[
48272,
48307
]
],
[
[
46617,
46652
],
[
88222,
88257
]
],
[
[
47400,
47440
],
[
84682,
84722
]
],
[
[
48509,
48517
],
[
18616,
18624
],
[
62549,
62557
],
[
64150,
64158
],
[
66182,
66190
]
],
[
[
52625,
52652
],
[
51774,
51801
]
],
[
[
53665,
53685
],
[
63850,
63870
]
],
[
[
55416,
55440
]
],
[
[
61214,
61236
]
],
[
[
62680,
62702
]
],
[
[
64284,
64304
]
],
[
[
66316,
66328
],
[
65523,
65535
]
],
[
[
68999,
69010
],
[
68193,
68204
]
],
[
[
70657,
70674
],
[
72136,
72153
]
],
[
[
71415,
71434
]
],
[
[
72340,
72361
],
[
77121,
77142
]
],
[
[
75300,
75344
]
],
[
[
77332,
77363
],
[
79673,
79704
],
[
80853,
80884
]
],
[
[
78649,
78672
]
],
[
[
79982,
80002
]
],
[
[
81194,
81214
]
],
[
[
83017,
83033
]
],
[
[
84017,
84048
]
],
[
[
84919,
84945
]
],
[
[
86300,
86317
]
],
[
[
87724,
87755
]
],
[
[
88375,
88387
]
],
[
[
89840,
89866
]
]
] |
# TODO: maybe make this flexible
| [] |
#!/usr/bin/env python
"""
A very simple progress bar which keep track of the progress as we consume an
iterator.
"""
import os
import signal
import time
from prompt_toolkit import HTML
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.shortcuts import ProgressBar
def main():
bottom_toolbar = HTML(
' <b>[f]</b> Print "f" <b>[q]</b> Abort <b>[x]</b> Send Control-C.'
)
# Create custom key bindings first.
kb = KeyBindings()
cancel = [False]
@kb.add("f")
def _(event):
print("You pressed `f`.")
@kb.add("q")
def _(event):
"Quit by setting cancel flag."
cancel[0] = True
@kb.add("x")
def _(event):
"Quit by sending SIGINT to the main thread."
os.kill(os.getpid(), signal.SIGINT)
# Use `patch_stdout`, to make sure that prints go above the
# application.
with patch_stdout():
with ProgressBar(key_bindings=kb, bottom_toolbar=bottom_toolbar) as pb:
for i in pb(range(800)):
time.sleep(0.01)
if cancel[0]:
break
if __name__ == "__main__":
main()
| [
[
[
124,
126
],
[
815,
817
],
[
823,
825
]
],
[
[
134,
140
],
[
836,
842
]
],
[
[
148,
152
],
[
1093,
1097
]
],
[
[
181,
185
],
[
374,
378
]
],
[
[
225,
236
],
[
513,
524
]
],
[
[
277,
289
],
[
944,
956
]
],
[
[
327,
338
],
[
973,
984
]
],
[
[
345,
349
],
[
1200,
1204
]
]
] |
#!/usr/bin/env python
# encoding: utf-8
import os
import sqlite3 as lite
import sys
import json
import time
import urllib.request
import tweepy
from TwitterMiner_Keys import *
from tweepy import OAuthHandler
from TwitterMiner_settings import *
import hashlib
#from Twitter_validate import validate_image
def dump_hash(twitter_dump):
data_hash = None # Ensure the value starts with nothing
dump = hashlib.sha1()
dump.update(twitter_dump)
data_hash = dump.hexdigest()
return data_hash
def file_hash(point_to_file):
hash_sha1 = hashlib.sha1()
with open(point_to_file, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha1.update(chunk)
print(hash_sha1.hexdigest())
return hash_sha1.hexdigest()
def extract_image_blob(posted_image_dest):
with open("test.jpg", "wb") as image_file:
c.execute("SELECT tweeted_image FROM T_Tweets WHERE Tweet_id = " + str(tweet_id))
ablob = c.fetchone()
image_file.write(ablob[0])
def create_db(table_name):
c.execute("PRAGMA journal_mode = WAL")
c.execute("CREATE TABLE IF NOT EXISTS " + table_name + "(tweet_id INTEGER NOT NULL PRIMARY KEY, date_mined TEXT, screen_name TEXT, \
user_id INTEGER, users_name TEXT, created_at_UTC TEXT, is_retweet TEXT, \
retweeted_times TEXT, text TEXT, place_name TEXT, country_code TEXT, country TEXT, \
bounding_box TEXT, source_tweeted TEXT, geo TEXT, in_reply_to_user TEXT, \
inreply_statusid TEXT, posted_image_dest TEXT, tweeted_image BLOB, image_hash TEXT, \
media_type TEXT, media_url TEXT, media_id TEXT, posted_video_dest TEXT, \
tweeted_video BLOB, video_hash TEXT, video_type TEXT, video_url TEXT, \
url_in_tweet TEXT, status BLOB, status_hash TEXT, bookmark TEXT)")
conn.commit()
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
try:
new_tweets = api.user_timeline(screen_name = screen_name, count=200)
except tweepy.TweepError:
print("Failed to pull tweets from %s" % screen_name)
print("User may be protected/private.")
print("Exiting...")
sys.exit()
except tweepy.RateLimitError: # I want to add code here to switch creds if a Rate limit occurs
print("Failed to pull the tweets due to a Twitter Rate Limit error.")
print("Please wait 15 min and try again...")
sys.exit()
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print("getting tweets before %s" % (oldest))
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print("...%s tweets downloaded so far" % (len(alltweets)))
#transform the tweepy tweets into a 2D array that will populate the csv
for status in alltweets:
# Pull the pieces of the tweet and put them in a variable
Tweetid = status.id
screenname = status.user.screen_name
userid = status.user.id
usersname = status.user.name
tweettime = status.created_at
# Checks to see if status has the attribute of status.retweeted_status, then assigns is_retweet a value
if hasattr(status, 'retweeted_status'):
is_retweet = True
#Added this section on 6-19-19 due to truncated ReTweets
#This checks for populated data in the extended_tweet
#If data is populated, it pulls the entire full_text
#Thanks to Fraser Phillips for finding this issue
if hasattr(status.retweeted_status, 'extended_tweet'):
Amp_text = str(status.retweeted_status.extended_tweet['full_text'])
tweet = "RT: " + Amp_text.replace('&','&')
else:
Amp_text = status.retweeted_status.text
tweet = "RT: " + Amp_text.replace('&','&')
else:
is_retweet = False
Amp_text = status.text
tweet = Amp_text.replace('&','&')
retweeted_times = status.retweet_count
if status.place is not None:
placename = status.place.full_name
countrycode = status.place.country_code
country = status.place.country
boundingbox = str(status.place.bounding_box.coordinates)
else:
placename = None
countrycode = None
country = None
boundingbox = None
Tweet_source = status.source
geo = status.geo
if geo is not None:
geo = json.dumps(geo)
inreplytouser = status.in_reply_to_screen_name
inreply_tostatus = status.in_reply_to_status_id_str
#Checks for Media in the Tweet and downloads it
if 'media' in status.entities:
image_posted = status.entities['media'][0]['media_url']
remove_tweet_url = image_posted.split('/')[-1]
posted_image_dest = os.path.join("Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_image/" + remove_tweet_url)
image_path = "Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_image/"
if not os.path.exists(image_path):
os.makedirs(image_path)
try:
print("Downloading... %s" % posted_image_dest)
urllib.request.urlretrieve(image_posted, filename = posted_image_dest)
tweeted_image = open(posted_image_dest, "rb").read()
image_hash = dump_hash(tweeted_image)
except urllib.error.URLError as e:
print("Error downloading file... %s ... from TweetID: %s" % (posted_image_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_image = None
image_hash = None
pass
except:
print("Error downloading file... %s ... from TweetID: %s" % (posted_image_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE - Unknown Error"
tweeted_image = None
image_hash = None
pass
mediatype = status.entities['media'][0]['type']
mediaurl = status.entities['media'][0]['media_url']
mediaid = status.entities['media'][0]['id']
else:
posted_image_dest = None
mediatype = None
mediaurl = None
mediaid = None
tweeted_image = None
image_hash = None
# New video Code
#Checks for Video in the tweets and downloads it
if hasattr(status, 'extended_entities'):
if 'video_info' in status.extended_entities['media'][0]:
# This section checks the number of dictionaries are in the variants
# It then looks at the bitrate of the variants and determines the highest value
# Once the highest value is determined, it extracts that video.
variant_times = len(status.extended_entities['media'][0]['video_info']['variants']) # Gets the number of variants
bit_rate = -1
for variant_count in range(0, variant_times): #iterates through all the variants in that tweets
if 'bitrate' in status.extended_entities['media'][0]['video_info']['variants'][variant_count] and \
bit_rate < status.extended_entities['media'][0]['video_info']['variants'][variant_count]['bitrate']:
bit_rate = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['bitrate']
videourl = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['url']
videotype = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['content_type']
remove_video_url = videourl.split('/')[-1]
posted_video_dest = os.path.join("Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_video/" + remove_video_url)
video_path = "Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_video/"
if not os.path.exists(video_path):
os.makedirs(video_path)
try:
print("Downloading... %s" % posted_video_dest)
urllib.request.urlretrieve(videourl, filename = posted_video_dest)
tweeted_video = open(posted_video_dest, "rb").read()
video_hash = dump_hash(tweeted_video)
except urllib.error.URLError as e:
print("Error downloading file... %s ... from TweetID: %s" % (posted_video_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_video = None
video_hash = None
pass
except:
print("Error downloading file... %s ... from TweetID: %s" % (posted_video_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_video = None
video_hash = None
pass
else:
posted_video_dest = None
videotype= None
videourl= None
tweeted_video = None
video_hash = None
else:
posted_video_dest = None
videotype= None
videourl= None
tweeted_video = None
video_hash = None
# End Video Check
# End new video Code
if not status.entities['urls']:
url_in_tweet = None
else:
url_in_tweet = str(status.entities['urls'][0]['url'])
#Grab the current date and time
now = time.strftime("%c")
# Starts the raw hash process
status_dump = str(status).encode('utf-8')
status_hash = dump_hash(status_dump)
bookmark = None
# Writes the data collected in the variables to the database
try:
c.execute("INSERT INTO " + table_name + "(tweet_id, date_mined, screen_name, user_id, users_name, \
created_at_UTC, is_retweet, retweeted_times,text, place_name, \
country_code, country, bounding_box, source_tweeted, geo, \
in_reply_to_user, inreply_statusid, posted_image_dest, \
tweeted_image, image_hash, media_type, media_url, media_id, \
posted_video_dest, tweeted_video, video_hash, video_type, \
video_url, url_in_tweet, status, status_hash, bookmark) \
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)" , \
(Tweetid,
now,
screenname,
userid,
usersname,
tweettime,
is_retweet,
retweeted_times,
tweet,
placename,
countrycode,
country,
boundingbox,
Tweet_source,
geo,
inreplytouser,
inreply_tostatus,
posted_image_dest,
tweeted_image,
image_hash,
mediatype,
mediaurl,
mediaid,
posted_video_dest,
tweeted_video,
video_hash,
videotype,
videourl,
url_in_tweet,
str(status),
status_hash,
bookmark))
conn.commit()
print(str(Tweetid), "--- Successfully added to the Database")
except lite.IntegrityError:
print(str(Tweetid), "--- Record already Exists")
if __name__ == '__main__':
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#---------
#---------
#--------- Be sure to enter a unique case name -- This is handled in TwitterMiner_settings now
#---------
#---------
casename = CASE_NAME
dbname = casename + ".db"
conn = lite.connect(dbname)
c = conn.cursor()
screenname = USER_NAME
table_name = USER_NAME + "_Tweets"
create_db(table_name)
get_all_tweets(screenname)
print("\n Finished collecting Tweets from user --- %s" % screenname)
print("Results are stored in " + casename)
#validate_image('T_Tweets') | [
[
[
48,
50
],
[
5987,
5989
],
[
6219,
6221
],
[
6263,
6265
],
[
9114,
9116
],
[
9370,
9372
],
[
9426,
9428
]
],
[
[
58,
73
],
[
15112,
15116
],
[
14606,
14610
]
],
[
[
81,
84
],
[
2742,
2745
],
[
2992,
2995
]
],
[
[
92,
96
],
[
5587,
5591
]
],
[
[
104,
108
],
[
11307,
11311
]
],
[
[
116,
130
],
[
6383,
6389
],
[
6631,
6637
],
[
9583,
9589
],
[
9887,
9893
]
],
[
[
138,
144
],
[
14850,
14856
],
[
2578,
2584
],
[
2764,
2770
]
],
[
[
175,
176
]
],
[
[
196,
208
],
[
14735,
14747
]
],
[
[
243,
244
],
[
14748,
14760
],
[
14762,
14777
],
[
14805,
14817
],
[
14819,
14838
],
[
15051,
15060
],
[
15178,
15187
],
[
15210,
15219
],
[
935,
943
]
],
[
[
252,
259
],
[
407,
414
],
[
553,
560
]
],
[
[
311,
320
],
[
6586,
6595
],
[
9830,
9839
],
[
11447,
11456
]
],
[
[
511,
520
]
],
[
[
770,
788
]
],
[
[
1019,
1028
],
[
15241,
15250
]
],
[
[
2209,
2223
],
[
15272,
15286
]
],
[
[
14728,
14732
],
[
14783,
14787
],
[
14861,
14865
]
],
[
[
14844,
14847
],
[
2511,
2514
],
[
3399,
3402
]
],
[
[
15040,
15048
],
[
15079,
15087
],
[
15414,
15422
],
[
6022,
6030
],
[
6144,
6152
],
[
9149,
9157
],
[
9283,
9291
]
],
[
[
15070,
15076
],
[
15125,
15131
]
],
[
[
15105,
15109
],
[
15142,
15146
],
[
2190,
2194
],
[
14498,
14502
]
],
[
[
15138,
15139
],
[
864,
865
],
[
962,
963
],
[
1051,
1052
],
[
1099,
1100
],
[
11603,
11604
]
],
[
[
15165,
15175
],
[
15287,
15297
],
[
15365,
15375
]
],
[
[
15197,
15207
],
[
15251,
15261
],
[
11630,
11640
]
]
] |
# Generated by Django 3.1.3 on 2021-03-13 11:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=1000)),
('is_correct', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='online course', max_length=30)),
('image', models.ImageField(upload_to='course_images/')),
('description', models.CharField(max_length=1000)),
('pub_date', models.DateField(null=True)),
('total_enrollment', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Enrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_enrolled', models.DateField(default=django.utils.timezone.now)),
('mode', models.CharField(choices=[('audit', 'Audit'), ('honor', 'Honor'), ('BETA', 'BETA')], default='audit', max_length=5)),
('rating', models.FloatField(default=5.0)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choices', models.ManyToManyField(to='onlinecourse.Choice')),
('enrollment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.enrollment')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=1000)),
('grade', models.IntegerField(default=1)),
('course', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
],
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='title', max_length=200)),
('order', models.IntegerField(default=0)),
('content', models.TextField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
],
),
migrations.CreateModel(
name='Learner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('occupation', models.CharField(choices=[('student', 'Student'), ('developer', 'Developer'), ('data_scientist', 'Data Scientist'), ('dba', 'Database Admin')], default='student', max_length=20)),
('social_link', models.URLField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Instructor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_time', models.BooleanField(default=True)),
('total_learners', models.IntegerField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='course',
name='instructors',
field=models.ManyToManyField(to='onlinecourse.Instructor'),
),
migrations.AddField(
model_name='course',
name='users',
field=models.ManyToManyField(through='onlinecourse.Enrollment', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.question'),
),
]
| [
[
[
73,
81
],
[
308,
316
],
[
1993,
2001
],
[
4033,
4041
],
[
4502,
4510
],
[
4900,
4908
]
],
[
[
104,
114
],
[
203,
213
],
[
276,
286
],
[
369,
379
],
[
726,
736
],
[
1302,
1312
],
[
2055,
2065
],
[
2485,
2495
],
[
2966,
2976
],
[
3491,
3501
],
[
4095,
4105
],
[
4564,
4574
],
[
4741,
4751
],
[
4946,
4956
]
],
[
[
116,
122
],
[
464,
470
],
[
588,
594
],
[
655,
661
],
[
821,
827
],
[
938,
944
],
[
1023,
1029
],
[
1103,
1109
],
[
1168,
1174
],
[
1235,
1241
],
[
1401,
1407
],
[
1527,
1533
],
[
1606,
1612
],
[
1751,
1757
],
[
1811,
1817
],
[
1927,
1933
],
[
2154,
2160
],
[
2274,
2280
],
[
2356,
2362
],
[
2582,
2588
],
[
2708,
2714
],
[
2770,
2776
],
[
2830,
2836
],
[
3061,
3067
],
[
3179,
3185
],
[
3257,
3263
],
[
3318,
3324
],
[
3366,
3372
],
[
3587,
3593
],
[
3710,
3716
],
[
3922,
3928
],
[
3967,
3973
],
[
4194,
4200
],
[
4316,
4322
],
[
4387,
4393
],
[
4436,
4442
],
[
4668,
4674
],
[
4839,
4845
],
[
5047,
5053
]
],
[
[
130,
155
]
],
[
[
163,
184
],
[
1552,
1558
],
[
1839,
1845
],
[
1955,
1961
],
[
2384,
2390
],
[
2869,
2875
],
[
3394,
3400
],
[
3995,
4001
],
[
4464,
4470
],
[
5075,
5081
]
],
[
[
193,
202
]
]
] |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from datetime import date, datetime, time, timedelta
from functools import partial
from io import BytesIO
from itertools import chain, groupby, imap
from math import ceil
from operator import attrgetter, itemgetter
from time import mktime
import dateutil
from dateutil.relativedelta import relativedelta
from flask import Response, flash, jsonify, redirect, request, session
from pytz import utc
from sqlalchemy.orm import joinedload, load_only, subqueryload, undefer, undefer_group
from werkzeug.exceptions import BadRequest, NotFound
from indico.core.db import db
from indico.core.db.sqlalchemy.colors import ColorTuple
from indico.core.db.sqlalchemy.util.queries import get_n_matching
from indico.modules.categories.controllers.base import RHDisplayCategoryBase
from indico.modules.categories.legacy import XMLCategorySerializer
from indico.modules.categories.models.categories import Category
from indico.modules.categories.serialize import (serialize_categories_ical, serialize_category, serialize_category_atom,
serialize_category_chain)
from indico.modules.categories.util import get_category_stats, get_upcoming_events, serialize_event_for_json_ld
from indico.modules.categories.views import WPCategory, WPCategoryCalendar, WPCategoryStatistics
from indico.modules.events.models.events import Event
from indico.modules.events.timetable.util import get_category_timetable
from indico.modules.events.util import get_base_ical_parameters
from indico.modules.news.util import get_recent_news
from indico.modules.users import User
from indico.modules.users.models.favorites import favorite_category_table
from indico.util.date_time import format_date, format_number, now_utc
from indico.util.decorators import classproperty
from indico.util.fs import secure_filename
from indico.util.i18n import _
from indico.util.string import to_unicode
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import send_file, url_for
from indico.web.rh import RH
from indico.web.util import jsonify_data
CALENDAR_COLOR_PALETTE = [
ColorTuple('#1F1100', '#ECC495'),
ColorTuple('#0F0202', '#B9CBCA'),
ColorTuple('#0D1E1F', '#C2ECEF'),
ColorTuple('#000000', '#D0C296'),
ColorTuple('#202020', '#EFEBC2')
]
def _flat_map(func, list_):
return chain.from_iterable(imap(func, list_))
class RHCategoryIcon(RHDisplayCategoryBase):
_category_query_options = undefer('icon'),
def _check_access(self):
# Category icons are always public
pass
def _process(self):
if not self.category.has_icon:
raise NotFound
metadata = self.category.icon_metadata
return send_file(metadata['filename'], BytesIO(self.category.icon), mimetype=metadata['content_type'],
conditional=True)
class RHCategoryLogo(RHDisplayCategoryBase):
_category_query_options = undefer('logo'),
def _process(self):
if not self.category.has_logo:
raise NotFound
metadata = self.category.logo_metadata
return send_file(metadata['filename'], BytesIO(self.category.logo), mimetype=metadata['content_type'],
conditional=True)
class RHCategoryStatistics(RHDisplayCategoryBase):
def _get_stats_json(self, stats):
data = {'events': stats['events_by_year'], 'contributions': stats['contribs_by_year'],
'files': stats['attachments'], 'updated': stats['updated'].isoformat()}
if self.category.is_root:
data['users'] = self._count_users()
return jsonify(data)
def _get_stats_html(self, stats):
plots, values, updated = self._process_stats(stats, root=self.category.is_root)
return WPCategoryStatistics.render_template('category_statistics.html', self.category,
plots=plots, values=values, updated=updated, has_stats=True)
def _process(self):
stats = get_category_stats(self.category.id)
if request.accept_mimetypes.best_match(('application/json', 'text/html')) == 'application/json':
return self._get_stats_json(stats)
else:
return self._get_stats_html(stats)
def _plot_data(self, stats, tooltip=''):
years = sorted(stats.iterkeys())
min_year = now_utc().year
max_year = min_year
if years:
min_year = min(min_year, years[0]) - 1
max_year = max(max_year, years[-1])
data = {year: stats.get(year, 0) for year in xrange(min_year, max_year + 1)}
max_y = ceil(max(data.itervalues()) * 1.1) # 1.1 for padding in the graph
else:
data = {}
max_y = 0
return {'min_x': min_year, 'max_x': max_year, 'min_y': 0, 'max_y': max_y, 'values': data,
'total': sum(data.itervalues()), 'label_x': _("Years"), 'label_y': '', 'tooltip': tooltip}
def _process_stats(self, stats, root=False):
# tooltip formatting is for ease of translation
plots = [(_('Number of events'),
_('The year is the one of the start date of the event.'),
self._plot_data(stats.get('events_by_year', {}),
tooltip=_('{value} events in {year}').format(value='', year=''))),
(_('Number of contributions'),
_('The year is the one of the start date of the contribution.'),
self._plot_data(stats.get('contribs_by_year', {}),
tooltip=_('{value} contributions in {year}').format(value='', year='')))]
values = [(_('Number of attachments'), stats['attachments'])]
if root:
values.append((_('Number of users'), self._count_users()))
return plots, values, stats['updated']
def _count_users(self):
return User.find(is_deleted=False, is_pending=False).count()
class RHCategoryInfo(RHDisplayCategoryBase):
@classproperty
@classmethod
def _category_query_options(cls):
children_strategy = subqueryload('children')
children_strategy.load_only('id', 'parent_id', 'title', 'protection_mode', 'event_creation_restricted')
children_strategy.subqueryload('acl_entries')
children_strategy.undefer('deep_children_count')
children_strategy.undefer('deep_events_count')
children_strategy.undefer('has_events')
return (children_strategy,
load_only('id', 'parent_id', 'title', 'protection_mode'),
subqueryload('acl_entries'),
undefer('deep_children_count'),
undefer('deep_events_count'),
undefer('has_events'),
undefer('chain'))
def _process(self):
return jsonify_data(flash=False,
**serialize_category_chain(self.category, include_children=True, include_parents=True))
class RHReachableCategoriesInfo(RH):
def _get_reachable_categories(self, id_, excluded_ids):
cat = Category.query.filter_by(id=id_).options(joinedload('children').load_only('id')).one()
ids = ({c.id for c in cat.children} | {c.id for c in cat.parent_chain_query}) - excluded_ids
if not ids:
return []
return (Category.query
.filter(Category.id.in_(ids))
.options(*RHCategoryInfo._category_query_options)
.all())
def _process(self):
excluded_ids = set(request.json.get('exclude', set())) if request.json else set()
categories = self._get_reachable_categories(request.view_args['category_id'], excluded_ids=excluded_ids)
return jsonify_data(categories=[serialize_category_chain(c, include_children=True) for c in categories],
flash=False)
class RHCategorySearch(RH):
def _process(self):
q = request.args['q'].lower()
query = (Category.query
.filter(Category.title_matches(q))
.options(undefer('deep_children_count'), undefer('deep_events_count'), undefer('has_events'),
joinedload('acl_entries')))
if session.user:
# Prefer favorite categories
query = query.order_by(Category.favorite_of.any(favorite_category_table.c.user_id == session.user.id)
.desc())
# Prefer exact matches and matches at the beginning, then order by category title and if
# those are identical by the chain titles
query = (query
.order_by((db.func.lower(Category.title) == q).desc(),
db.func.lower(Category.title).startswith(q).desc(),
db.func.lower(Category.title),
Category.chain_titles))
total_count = query.count()
query = query.limit(10)
return jsonify_data(categories=[serialize_category(c, with_favorite=True, with_path=True) for c in query],
total_count=total_count, flash=False)
class RHSubcatInfo(RHDisplayCategoryBase):
"""Get basic information about subcategories.
This is intended to return information shown on a category display
page that is not needed immediately and is somewhat expensive to
retrieve.
"""
@classproperty
@classmethod
def _category_query_options(cls):
children_strategy = joinedload('children')
children_strategy.load_only('id')
children_strategy.undefer('deep_events_count')
return children_strategy, load_only('id', 'parent_id', 'protection_mode')
def _process(self):
event_counts = {c.id: {'value': c.deep_events_count, 'pretty': format_number(c.deep_events_count)}
for c in self.category.children}
return jsonify_data(flash=False, event_counts=event_counts)
class RHDisplayCategoryEventsBase(RHDisplayCategoryBase):
"""Base class for display pages displaying an event list"""
_category_query_options = (joinedload('children').load_only('id', 'title', 'protection_mode'),
undefer('attachment_count'), undefer('has_events'))
_event_query_options = (joinedload('person_links'), joinedload('series'), undefer_group('series'),
load_only('id', 'category_id', 'created_dt', 'start_dt', 'end_dt', 'timezone',
'protection_mode', 'title', 'type_', 'series_pos', 'series_count',
'own_address', 'own_venue_id', 'own_venue_name'))
def _process_args(self):
RHDisplayCategoryBase._process_args(self)
self.now = now_utc(exact=False).astimezone(self.category.display_tzinfo)
def format_event_date(self, event):
day_month = 'dd MMM'
tzinfo = self.category.display_tzinfo
start_dt = event.start_dt.astimezone(tzinfo)
end_dt = event.end_dt.astimezone(tzinfo)
if start_dt.year != end_dt.year:
return '{} - {}'.format(to_unicode(format_date(start_dt, timezone=tzinfo)),
to_unicode(format_date(end_dt, timezone=tzinfo)))
elif (start_dt.month != end_dt.month) or (start_dt.day != end_dt.day):
return '{} - {}'.format(to_unicode(format_date(start_dt, day_month, timezone=tzinfo)),
to_unicode(format_date(end_dt, day_month, timezone=tzinfo)))
else:
return to_unicode(format_date(start_dt, day_month, timezone=tzinfo))
def group_by_month(self, events):
def _format_tuple(x):
(year, month), events = x
return {'name': format_date(date(year, month, 1), format='MMMM yyyy'),
'events': list(events),
'is_current': year == self.now.year and month == self.now.month}
def _key(event):
start_dt = event.start_dt.astimezone(self.category.tzinfo)
return start_dt.year, start_dt.month
months = groupby(events, key=_key)
return map(_format_tuple, months)
def happening_now(self, event):
return event.start_dt <= self.now < event.end_dt
def is_recent(self, dt):
return dt > self.now - relativedelta(weeks=1)
class RHDisplayCategory(RHDisplayCategoryEventsBase):
"""Show the contents of a category (events/subcategories)"""
def _process(self):
# Current events, which are always shown by default are events of this month and of the previous month.
# If there are no events in this range, it will include the last and next month containing events.
past_threshold = self.now - relativedelta(months=1, day=1, hour=0, minute=0)
future_threshold = self.now + relativedelta(months=1, day=1, hour=0, minute=0)
next_event_start_dt = (db.session.query(Event.start_dt)
.filter(Event.start_dt >= self.now, Event.category_id == self.category.id)
.order_by(Event.start_dt.asc(), Event.id.asc())
.first() or (None,))[0]
previous_event_start_dt = (db.session.query(Event.start_dt)
.filter(Event.start_dt < self.now, Event.category_id == self.category.id)
.order_by(Event.start_dt.desc(), Event.id.desc())
.first() or (None,))[0]
if next_event_start_dt is not None and next_event_start_dt > future_threshold:
future_threshold = next_event_start_dt + relativedelta(months=1, day=1, hour=0, minute=0)
if previous_event_start_dt is not None and previous_event_start_dt < past_threshold:
past_threshold = previous_event_start_dt.replace(day=1, hour=0, minute=0)
event_query = (Event.query.with_parent(self.category)
.options(*self._event_query_options)
.order_by(Event.start_dt.desc(), Event.id.desc()))
past_event_query = event_query.filter(Event.start_dt < past_threshold)
future_event_query = event_query.filter(Event.start_dt >= future_threshold)
current_event_query = event_query.filter(Event.start_dt >= past_threshold,
Event.start_dt < future_threshold)
json_ld_events = events = current_event_query.filter(Event.start_dt < future_threshold).all()
events_by_month = self.group_by_month(events)
future_event_count = future_event_query.count()
past_event_count = past_event_query.count()
if not session.user and future_event_count:
json_ld_events = json_ld_events + future_event_query.all()
show_future_events = bool(self.category.id in session.get('fetch_future_events_in', set()) or
(session.user and session.user.settings.get('show_future_events', False)))
show_past_events = bool(self.category.id in session.get('fetch_past_events_in', set()) or
(session.user and session.user.settings.get('show_past_events', False)))
managers = sorted(self.category.get_manager_list(), key=attrgetter('principal_type.name', 'name'))
threshold_format = '%Y-%m'
params = {'event_count': len(events),
'events_by_month': events_by_month,
'format_event_date': self.format_event_date,
'future_event_count': future_event_count,
'show_future_events': show_future_events,
'future_threshold': future_threshold.strftime(threshold_format),
'happening_now': self.happening_now,
'is_recent': self.is_recent,
'managers': managers,
'past_event_count': past_event_count,
'show_past_events': show_past_events,
'past_threshold': past_threshold.strftime(threshold_format),
'json_ld': map(serialize_event_for_json_ld, json_ld_events),
'atom_feed_url': url_for('.export_atom', self.category),
'atom_feed_title': _('Events of "{}"').format(self.category.title)}
params.update(get_base_ical_parameters(session.user, 'category',
'/export/categ/{0}.ics'.format(self.category.id), {'from': '-31d'}))
if not self.category.is_root:
return WPCategory.render_template('display/category.html', self.category, **params)
news = get_recent_news()
upcoming_events = get_upcoming_events()
return WPCategory.render_template('display/root_category.html', self.category, news=news,
upcoming_events=upcoming_events, **params)
class RHEventList(RHDisplayCategoryEventsBase):
"""Return the HTML for the event list before/after a specific month"""
def _parse_year_month(self, string):
try:
dt = datetime.strptime(string, '%Y-%m')
except (TypeError, ValueError):
return None
return self.category.display_tzinfo.localize(dt)
def _process_args(self):
RHDisplayCategoryEventsBase._process_args(self)
before = self._parse_year_month(request.args.get('before'))
after = self._parse_year_month(request.args.get('after'))
if before is None and after is None:
raise BadRequest('"before" or "after" parameter must be specified')
event_query = (Event.query.with_parent(self.category)
.options(*self._event_query_options)
.order_by(Event.start_dt.desc(), Event.id.desc()))
if before:
event_query = event_query.filter(Event.start_dt < before)
if after:
event_query = event_query.filter(Event.start_dt >= after)
self.events = event_query.all()
def _process(self):
events_by_month = self.group_by_month(self.events)
tpl = get_template_module('categories/display/event_list.html')
html = tpl.event_list_block(events_by_month=events_by_month, format_event_date=self.format_event_date,
is_recent=self.is_recent, happening_now=self.happening_now)
return jsonify_data(flash=False, html=html)
class RHShowEventsInCategoryBase(RHDisplayCategoryBase):
"""Set whether the events in a category are automatically displayed or not"""
session_field = ''
def _show_events(self, show_events):
category_ids = session.setdefault(self.session_field, set())
if show_events:
category_ids.add(self.category.id)
else:
category_ids.discard(self.category.id)
session.modified = True
def _process_DELETE(self):
self._show_events(False)
def _process_PUT(self):
self._show_events(True)
class RHShowFutureEventsInCategory(RHShowEventsInCategoryBase):
"""Set whether the past events in a category are automatically displayed or not"""
session_field = 'fetch_future_events_in'
class RHShowPastEventsInCategory(RHShowEventsInCategoryBase):
"""Set whether the past events in a category are automatically displayed or not"""
session_field = 'fetch_past_events_in'
class RHExportCategoryICAL(RHDisplayCategoryBase):
def _process(self):
filename = '{}-category.ics'.format(secure_filename(self.category.title, str(self.category.id)))
buf = serialize_categories_ical([self.category.id], session.user,
Event.end_dt >= (now_utc() - timedelta(weeks=4)))
return send_file(filename, buf, 'text/calendar')
class RHExportCategoryAtom(RHDisplayCategoryBase):
def _process(self):
filename = '{}-category.atom'.format(secure_filename(self.category.title, str(self.category.id)))
buf = serialize_category_atom(self.category,
url_for(request.endpoint, self.category, _external=True),
session.user,
Event.end_dt >= now_utc())
return send_file(filename, buf, 'application/atom+xml')
class RHXMLExportCategoryInfo(RH):
def _process_args(self):
try:
id_ = int(request.args['id'])
except ValueError:
raise BadRequest('Invalid Category ID')
self.category = Category.get_one(id_, is_deleted=False)
def _process(self):
category_xml_info = XMLCategorySerializer(self.category).serialize_category()
return Response(category_xml_info, mimetype='text/xml')
class RHCategoryOverview(RHDisplayCategoryBase):
"""Display the events for a particular day, week or month"""
def _process_args(self):
RHDisplayCategoryBase._process_args(self)
self.detail = request.args.get('detail', 'event')
if self.detail not in ('event', 'session', 'contribution'):
raise BadRequest('Invalid detail argument')
self.period = request.args.get('period', 'day')
if self.period not in ('day', 'month', 'week'):
raise BadRequest('Invalid period argument')
if 'date' in request.args:
try:
date = datetime.strptime(request.args['date'], '%Y-%m-%d')
except ValueError:
raise BadRequest('Invalid date argument')
else:
date = datetime.now()
date = self.category.display_tzinfo.localize(date)
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
if self.period == 'day':
self.start_dt = date
self.end_dt = self.start_dt + relativedelta(days=1)
elif self.period == 'week':
self.start_dt = date - relativedelta(days=date.weekday())
self.end_dt = self.start_dt + relativedelta(days=7)
elif self.period == 'month':
self.start_dt = date + relativedelta(day=1)
self.end_dt = self.start_dt + relativedelta(months=1)
def _process(self):
info = get_category_timetable([self.category.id], self.start_dt, self.end_dt, detail_level=self.detail,
tz=self.category.display_tzinfo, from_categ=self.category, grouped=False)
events = info['events']
# Only categories with icons are listed in the sidebar
subcategory_ids = {event.category.effective_icon_data['source_id']
for event in events if event.category.has_effective_icon}
subcategories = Category.query.filter(Category.id.in_(subcategory_ids))
# Events spanning multiple days must appear on all days
events = _flat_map(partial(self._process_multiday_events, info), events)
def _event_sort_key(event):
# Ongoing events are shown after all other events on the same day and are sorted by start_date
ongoing = getattr(event, 'ongoing', False)
return (event.start_dt.date(), ongoing,
-mktime(event.first_occurence_start_dt.timetuple()) if ongoing else event.start_dt.time())
events = sorted(events, key=_event_sort_key)
params = {
'detail': self.detail,
'period': self.period,
'subcategories': subcategories,
'start_dt': self.start_dt,
'end_dt': self.end_dt - relativedelta(days=1), # Display a close-ended interval
'previous_day_url': self._other_day_url(self.start_dt - relativedelta(days=1)),
'next_day_url': self._other_day_url(self.start_dt + relativedelta(days=1)),
'previous_month_url': self._other_day_url(self.start_dt - relativedelta(months=1)),
'next_month_url': self._other_day_url(self.start_dt + relativedelta(months=1)),
'previous_year_url': self._other_day_url(self.start_dt - relativedelta(years=1)),
'next_year_url': self._other_day_url(self.start_dt + relativedelta(years=1)),
'mathjax': True
}
if self.detail != 'event':
cte = self.category.get_protection_parent_cte()
params['accessible_categories'] = {cat_id
for cat_id, prot_parent_id in db.session.query(cte)
if prot_parent_id == self.category.id}
if self.period == 'day':
return WPCategory.render_template('display/overview/day.html', self.category, events=events, **params)
elif self.period == 'week':
days = self._get_week_days()
template = 'display/overview/week.html'
params['previous_week_url'] = self._other_day_url(self.start_dt - relativedelta(days=7))
params['next_week_url'] = self._other_day_url(self.start_dt + relativedelta(days=7))
elif self.period == 'month':
days = self._get_calendar_days()
template = 'display/overview/month.html'
events_by_day = []
for day in days:
events_by_day.append((day, self._pop_head_while(lambda x: x.start_dt.date() <= day.date(), events)))
# Check whether all weekends are empty
hide_weekend = (not any(map(itemgetter(1), events_by_day[5::7])) and
not any(map(itemgetter(1), events_by_day[6::7])))
if hide_weekend:
events_by_day = [x for x in events_by_day if x[0].weekday() not in (5, 6)]
return WPCategory.render_template(template, self.category, events_by_day=events_by_day,
hide_weekend=hide_weekend, **params)
def _get_week_days(self):
# Return the days shown in the weekly overview
return self._get_days(self.start_dt, self.end_dt)
def _get_calendar_days(self):
# Return the days shown in the monthly overview
start_dt = self.start_dt - relativedelta(days=self.start_dt.weekday())
end_dt = self.end_dt + relativedelta(days=(7 - self.end_dt.weekday()) % 7)
return self._get_days(start_dt, end_dt)
@staticmethod
def _get_days(start_dt, end_dt):
# Return all days in the open-ended interval
current_dt = start_dt
tz = current_dt.tzinfo
next_day = current_dt.date() + timedelta(1)
beginning_of_next_day = tz.localize(datetime.combine(next_day, time()))
while current_dt < end_dt:
yield current_dt
current_dt = beginning_of_next_day
beginning_of_next_day = current_dt + relativedelta(days=1)
@staticmethod
def _pop_head_while(predicate, list_):
# Pop the head of the list while the predicate is true and return the popped elements
res = []
while len(list_) and predicate(list_[0]):
res.append(list_[0])
list_.pop(0)
return res
def _other_day_url(self, date):
return url_for('.overview', self.category, detail=self.detail, period=self.period,
date=format_date(date, 'yyyy-MM-dd'))
def _process_multiday_events(self, info, event):
# Add "fake" proxy events for events spanning multiple days such that there is one event per day
# Function type: Event -> List[Event]
tzinfo = self.category.display_tzinfo
# Breaks, contributions and sessions grouped by start_dt. Each EventProxy will return the relevant ones only
timetable_objects = sorted(chain(*info[event.id].values()), key=attrgetter('timetable_entry.start_dt'))
timetable_objects_by_date = {x[0]: list(x[1]) for x
in groupby(timetable_objects, key=lambda x: x.start_dt.astimezone(tzinfo).date())}
# All the days of the event shown in the overview
event_days = self._get_days(max(self.start_dt, event.start_dt.astimezone(tzinfo)),
min(self.end_dt, event.end_dt.astimezone(tzinfo)))
# Generate a proxy object with adjusted start_dt and timetable_objects for each day
return [_EventProxy(event, day, tzinfo, timetable_objects_by_date.get(day.date(), [])) for day in event_days]
class _EventProxy(object):
def __init__(self, event, date, tzinfo, timetable_objects):
start_dt = datetime.combine(date, event.start_dt.astimezone(tzinfo).timetz())
assert date >= event.start_dt
assert date <= event.end_dt
object.__setattr__(self, '_start_dt', start_dt)
object.__setattr__(self, '_real_event', event)
object.__setattr__(self, '_event_tz_start_date', event.start_dt.astimezone(tzinfo).date())
object.__setattr__(self, '_timetable_objects', timetable_objects)
def __getattribute__(self, name):
if name == 'start_dt':
return object.__getattribute__(self, '_start_dt')
event = object.__getattribute__(self, '_real_event')
if name == 'timetable_objects':
return object.__getattribute__(self, '_timetable_objects')
if name == 'ongoing':
# the event is "ongoing" if the dates (in the tz of the category)
# of the event and the proxy (calendar entry) don't match
event_start_date = object.__getattribute__(self, '_event_tz_start_date')
return event_start_date != self.start_dt.date()
if name == 'first_occurence_start_dt':
return event.start_dt
return getattr(event, name)
def __setattr__(self, name, value):
raise AttributeError('This instance is read-only')
def __repr__(self):
return '<_EventProxy({}, {})>'.format(self.start_dt, object.__getattribute__(self, '_real_event'))
class RHCategoryCalendarView(RHDisplayCategoryBase):
def _process(self):
if not request.is_xhr:
return WPCategoryCalendar.render_template('display/calendar.html', self.category,
start_dt=request.args.get('start_dt'))
tz = self.category.display_tzinfo
start = tz.localize(dateutil.parser.parse(request.args['start'])).astimezone(utc)
end = tz.localize(dateutil.parser.parse(request.args['end'])).astimezone(utc)
query = (Event.query
.filter(Event.starts_between(start, end),
Event.is_visible_in(self.category.id),
~Event.is_deleted)
.options(load_only('id', 'title', 'start_dt', 'end_dt', 'category_id')))
events = self._get_event_data(query)
ongoing_events = (Event.query
.filter(Event.is_visible_in(self.category.id),
Event.start_dt < start,
Event.end_dt > end)
.options(load_only('id', 'title', 'start_dt', 'end_dt', 'timezone'))
.order_by(Event.title)
.all())
return jsonify_data(flash=False, events=events, ongoing_event_count=len(ongoing_events),
ongoing_events_html=self._render_ongoing_events(ongoing_events))
def _get_event_data(self, event_query):
data = []
tz = self.category.display_tzinfo
for event in event_query:
category_id = event.category_id
event_data = {'title': event.title,
'start': event.start_dt.astimezone(tz).replace(tzinfo=None).isoformat(),
'end': event.end_dt.astimezone(tz).replace(tzinfo=None).isoformat(),
'url': event.url}
colors = CALENDAR_COLOR_PALETTE[category_id % len(CALENDAR_COLOR_PALETTE)]
event_data.update({'textColor': '#' + colors.text, 'color': '#' + colors.background})
data.append(event_data)
return data
def _render_ongoing_events(self, ongoing_events):
template = get_template_module('categories/display/_calendar_ongoing_events.html')
return template.render_ongoing_events(ongoing_events, self.category.display_tzinfo)
class RHCategoryUpcomingEvent(RHDisplayCategoryBase):
"""Redirect to the upcoming event of a category."""
def _process(self):
event = self._get_upcoming_event()
if event:
return redirect(event.url)
else:
flash(_('There are no upcoming events for this category'))
return redirect(self.category.url)
def _get_upcoming_event(self):
query = (Event.query
.filter(Event.is_visible_in(self.category.id),
Event.start_dt > now_utc(),
~Event.is_deleted)
.options(subqueryload('acl_entries'))
.order_by(Event.start_dt, Event.id))
res = get_n_matching(query, 1, lambda event: event.can_access(session.user))
if res:
return res[0]
| [
[
[
237,
253
]
],
[
[
276,
280
],
[
11988,
11992
]
],
[
[
282,
290
],
[
17299,
17307
],
[
21568,
21576
],
[
21742,
21750
],
[
26658,
26666
],
[
28586,
28594
]
],
[
[
292,
296
],
[
26685,
26689
]
],
[
[
298,
307
],
[
19917,
19926
],
[
26601,
26610
]
],
[
[
330,
337
],
[
23023,
23030
]
],
[
[
353,
360
],
[
2997,
3004
],
[
3383,
3390
]
],
[
[
383,
388
],
[
2593,
2598
],
[
27769,
27774
]
],
[
[
390,
397
],
[
12324,
12331
],
[
27946,
27953
]
],
[
[
399,
403
],
[
2613,
2617
]
],
[
[
421,
425
],
[
4876,
4880
]
],
[
[
447,
457
],
[
15495,
15505
],
[
27806,
27816
]
],
[
[
459,
469
],
[
25544,
25554
],
[
25621,
25631
]
],
[
[
487,
493
],
[
23349,
23355
]
],
[
[
502,
510
],
[
30354,
30362
],
[
30442,
30450
]
],
[
[
546,
559
],
[
12547,
12560
],
[
12971,
12984
],
[
13058,
13071
],
[
13872,
13885
],
[
21995,
22008
],
[
22088,
22101
],
[
22165,
22178
],
[
22259,
22272
],
[
22322,
22335
],
[
23701,
23714
],
[
23826,
23839
],
[
23914,
23927
],
[
24008,
24021
],
[
24100,
24113
],
[
24195,
24208
],
[
24285,
24298
],
[
25039,
25052
],
[
25136,
25149
],
[
26217,
26230
],
[
26292,
26305
],
[
26854,
26867
]
],
[
[
578,
586
],
[
20898,
20906
]
],
[
[
588,
593
],
[
32635,
32640
]
],
[
[
595,
602
],
[
3861,
3868
]
],
[
[
604,
612
],
[
32589,
32597
],
[
32713,
32721
]
],
[
[
614,
621
],
[
4299,
4306
],
[
7812,
7819
],
[
7773,
7780
],
[
7888,
7895
],
[
8169,
8176
],
[
17581,
17588
],
[
17648,
17655
],
[
20277,
20284
],
[
20609,
20616
],
[
21165,
21172
],
[
21347,
21354
],
[
21514,
21521
],
[
21586,
21593
],
[
30081,
30088
],
[
30254,
30261
],
[
30376,
30383
],
[
30464,
30471
]
],
[
[
623,
630
],
[
8455,
8462
],
[
8607,
8614
],
[
14907,
14914
],
[
15070,
15077
],
[
15153,
15160
],
[
15170,
15177
],
[
15279,
15286
],
[
15358,
15365
],
[
15375,
15382
],
[
16560,
16567
],
[
18858,
18865
],
[
19048,
19055
],
[
19834,
19841
],
[
20365,
20372
],
[
33146,
33153
]
],
[
[
648,
651
],
[
30411,
30414
],
[
30497,
30500
]
],
[
[
679,
689
],
[
10323,
10333
],
[
10502,
10512
],
[
10530,
10540
],
[
7365,
7375
],
[
8416,
8426
],
[
9708,
9718
]
],
[
[
691,
700
],
[
10605,
10614
],
[
6759,
6768
],
[
9862,
9871
],
[
30724,
30733
],
[
31091,
31100
]
],
[
[
702,
714
],
[
6357,
6369
],
[
6833,
6845
],
[
32993,
33005
]
],
[
[
716,
723
],
[
2709,
2716
],
[
3181,
3188
],
[
10422,
10429
],
[
10451,
10458
],
[
6878,
6885
],
[
6926,
6933
],
[
6972,
6979
],
[
7011,
7018
],
[
8305,
8312
],
[
8337,
8344
],
[
8367,
8374
]
],
[
[
725,
738
],
[
10552,
10565
]
],
[
[
771,
781
],
[
17738,
17748
],
[
20674,
20684
],
[
21287,
21297
],
[
21455,
21465
],
[
21673,
21683
]
],
[
[
783,
791
],
[
2894,
2902
],
[
3280,
3288
]
],
[
[
820,
822
],
[
8866,
8868
],
[
8937,
8939
],
[
9016,
9018
],
[
13138,
13140
],
[
13446,
13448
],
[
24575,
24577
]
],
[
[
868,
878
],
[
2365,
2375
],
[
2403,
2413
],
[
2441,
2451
],
[
2479,
2489
],
[
2517,
2527
]
],
[
[
930,
944
],
[
33090,
33104
]
],
[
[
1000,
1021
],
[
2655,
2676
],
[
3127,
3148
],
[
3519,
3540
],
[
6231,
6252
],
[
9368,
9389
],
[
10203,
10224
],
[
18663,
18684
],
[
19621,
19642
],
[
20024,
20045
],
[
20974,
20995
],
[
30018,
30039
],
[
32404,
32425
],
[
10915,
10936
],
[
21101,
21122
]
],
[
[
1067,
1088
],
[
20825,
20846
]
],
[
[
1145,
1153
],
[
7324,
7332
],
[
7570,
7578
],
[
7609,
7617
],
[
8212,
8220
],
[
8252,
8260
],
[
8545,
8553
],
[
8880,
8888
],
[
8951,
8959
],
[
9030,
9038
],
[
9074,
9082
],
[
20732,
20740
],
[
22875,
22883
],
[
22897,
22905
]
],
[
[
1203,
1228
],
[
19788,
19813
]
],
[
[
1230,
1248
],
[
9206,
9224
]
],
[
[
1250,
1273
],
[
20192,
20215
]
],
[
[
1324,
1348
],
[
7125,
7149
],
[
7989,
8013
]
],
[
[
1393,
1411
],
[
4251,
4269
]
],
[
[
1413,
1432
],
[
16897,
16916
]
],
[
[
1434,
1461
],
[
16306,
16333
]
],
[
[
1506,
1516
],
[
16760,
16770
],
[
16934,
16944
],
[
24736,
24746
],
[
25787,
25797
]
],
[
[
1518,
1536
],
[
30116,
30134
]
],
[
[
1538,
1558
],
[
4017,
4037
]
],
[
[
1607,
1612
],
[
13155,
13160
],
[
13210,
13215
],
[
13238,
13243
],
[
13318,
13323
],
[
13340,
13345
],
[
13463,
13468
],
[
13522,
13527
],
[
13549,
13554
],
[
13633,
13638
],
[
13656,
13661
],
[
14123,
14128
],
[
14255,
14260
],
[
14278,
14283
],
[
14342,
14347
],
[
14423,
14428
],
[
14508,
14513
],
[
14591,
14596
],
[
14687,
14692
],
[
17823,
17828
],
[
17955,
17960
],
[
17978,
17983
],
[
18060,
18065
],
[
18148,
18153
],
[
19888,
19893
],
[
20417,
20422
],
[
30519,
30524
],
[
30556,
30561
],
[
30615,
30620
],
[
30680,
30685
],
[
30859,
30864
],
[
30905,
30910
],
[
30978,
30983
],
[
31036,
31041
],
[
31187,
31192
],
[
32794,
32799
],
[
32831,
32836
],
[
32895,
32900
],
[
32949,
32954
],
[
33049,
33054
],
[
33065,
33070
]
],
[
[
1662,
1684
],
[
22386,
22408
]
],
[
[
1724,
1748
],
[
16535,
16559
]
],
[
[
1786,
1801
],
[
16853,
16868
]
],
[
[
1835,
1839
],
[
6154,
6158
]
],
[
[
1890,
1913
],
[
8570,
8593
]
],
[
[
1948,
1959
],
[
11344,
11355
],
[
11432,
11443
],
[
11597,
11608
],
[
11696,
11707
],
[
11790,
11801
],
[
27332,
27343
],
[
11976,
11987
]
],
[
[
1961,
1974
],
[
10006,
10019
]
],
[
[
1976,
1983
],
[
4607,
4614
],
[
10976,
10983
],
[
19905,
19912
],
[
20433,
20440
],
[
32912,
32919
]
],
[
[
2019,
2032
],
[
6260,
6273
],
[
9611,
9624
]
],
[
[
2060,
2075
],
[
19713,
19728
],
[
20117,
20132
]
],
[
[
2105,
2106
],
[
5159,
5160
],
[
5330,
5331
],
[
5371,
5372
],
[
5538,
5539
],
[
5615,
5616
],
[
5663,
5664
],
[
5839,
5840
],
[
5924,
5925
],
[
6019,
6020
],
[
16464,
16465
],
[
32641,
32642
]
],
[
[
2138,
2148
],
[
11333,
11343
],
[
11421,
11431
],
[
11586,
11596
],
[
11685,
11695
],
[
11779,
11789
]
],
[
[
2189,
2208
],
[
18311,
18330
],
[
32208,
32227
]
],
[
[
2243,
2252
],
[
2965,
2974
],
[
3351,
3360
],
[
19953,
19962
],
[
20459,
20468
]
],
[
[
2254,
2261
],
[
16387,
16394
],
[
20269,
20276
],
[
27228,
27235
]
],
[
[
2288,
2290
],
[
7245,
7247
],
[
8128,
8130
],
[
20540,
20542
]
],
[
[
2319,
2331
],
[
7069,
7081
],
[
7964,
7976
],
[
9181,
9193
],
[
10114,
10126
],
[
18591,
18603
],
[
31249,
31261
]
],
[
[
2334,
2356
],
[
31914,
31936
],
[
31955,
31977
]
],
[
[
2558,
2567
],
[
23013,
23022
]
],
[
[
2640,
2654
]
],
[
[
3112,
3126
]
],
[
[
3498,
3518
]
],
[
[
6216,
6230
],
[
7657,
7671
]
],
[
[
7219,
7244
]
],
[
[
8111,
8127
]
],
[
[
9355,
9367
]
],
[
[
10175,
10202
],
[
12596,
12623
],
[
17122,
17149
],
[
17493,
17520
]
],
[
[
12578,
12595
]
],
[
[
17110,
17121
]
],
[
[
18636,
18662
],
[
19235,
19261
],
[
19432,
19458
]
],
[
[
19206,
19234
]
],
[
[
19405,
19431
]
],
[
[
19600,
19620
]
],
[
[
20003,
20023
]
],
[
[
20516,
20539
]
],
[
[
20955,
20973
]
],
[
[
28482,
28493
],
[
28372,
28383
]
],
[
[
29995,
30017
]
],
[
[
32380,
32403
]
]
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of generic operator strategy."""
# pylint: disable=invalid-name,unused-argument
import logging
import re
from tvm import topi, _ffi, te, ir
from tvm.topi.utils import get_const_int, get_const_float, get_const_tuple, get_float_tuple
from tvm.target import generic_func, override_native_generic_func
from .. import op as _op
logger = logging.getLogger("strategy")
def naive_schedule(_, outs, target):
"""Return the naive default schedule"""
if "gpu" in target.keys:
# For GPU, we at least need thread binding to make a valid schedule.
# So the naive schedule cannot be compiled.
raise RuntimeError(
"Cannot compile for GPU targets if no tuned schedule is found. "
"Please see the warning messages above for more information about the failed workloads."
)
return te.create_schedule(outs[-1].op)
def wrap_topi_schedule(topi_schedule):
"""Wrap TOPI schedule which doesn't use attrs"""
def wrapper(attrs, outs, target):
with target:
return topi_schedule(outs)
return wrapper
def get_conv2d_in_channels(data_shape, data_layout):
"""Get conv2d input channels"""
data_shape = get_const_tuple(data_shape)
if len(data_shape) == 4:
idx = data_layout.find("C")
assert idx >= 0, "Invalid conv2d data layout {}".format(data_layout)
return data_shape[idx]
if re.match(r"NCHW\d*c", data_layout):
# NCHW[8]c
return data_shape[1] * data_shape[4]
raise ValueError("Unknown conv2d data layout {}".format(data_layout))
def get_conv2d_out_channels(kernel_shape, kernel_layout):
"""Get conv2d output channels"""
kernel_shape = get_const_tuple(kernel_shape)
if len(kernel_shape) == 4:
idx = kernel_layout.find("O")
assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout)
return kernel_shape[idx]
if re.match(r"OIHW\d*i\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[5]
if re.match(r"OIHW\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[4]
raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout))
def is_depthwise_conv2d(data_shape, data_layout, kernel_shape, kernel_layout, groups):
ic = get_conv2d_in_channels(data_shape, data_layout)
oc = get_conv2d_out_channels(kernel_shape, kernel_layout)
return ic == oc == groups
@generic_func
def schedule_injective(attrs, outs, target):
"""Schedule injective ops"""
with target:
return topi.generic.schedule_injective(outs)
@generic_func
def schedule_reduce(attrs, outs, target):
"""Schedule reduction ops"""
with target:
return topi.generic.schedule_reduce(outs)
_op._schedule_injective = schedule_injective
_op._schedule_reduce = schedule_reduce
# concatenate
@generic_func
def schedule_concatenate(attrs, outs, target):
"""Schedule concatenate op"""
with target:
return topi.generic.schedule_injective(outs)
# pool
@generic_func
def schedule_pool(attrs, outs, target):
"""Schedule pooling ops"""
with target:
return topi.generic.schedule_pool(outs, attrs.layout)
# pool_grad
@generic_func
def schedule_pool_grad(attrs, outs, target):
"""Schedule pooling gradient ops"""
with target:
return topi.generic.schedule_pool_grad(outs)
# adaptive pool
@generic_func
def schedule_adaptive_pool(attrs, outs, target):
"""Schedule adaptive pooling ops"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
# softmax
def wrap_compute_softmax(topi_compute):
"""Wrap softmax topi compute"""
def _compute_softmax(attrs, inputs, out_type):
axis = attrs.get_int("axis")
return [topi_compute(inputs[0], axis)]
return _compute_softmax
@override_native_generic_func("softmax_strategy")
def softmax_strategy(attrs, inputs, out_type, target):
"""softmax generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.generic.schedule_softmax),
name="softmax.generic",
)
return strategy
# log_softmax
@generic_func
def schedule_log_softmax(attrs, outs, target):
"""Schedule log_softmax op"""
with target:
return topi.generic.schedule_softmax(outs)
# lrn
@generic_func
def schedule_lrn(attrs, outs, target):
"""Schedule LRN op"""
with target:
return topi.generic.schedule_lrn(outs)
# bitpack
@generic_func
def schedule_bitpack(attrs, outs, target):
"""Schedule bitpack"""
with target:
return topi.generic.schedule_bitpack(outs)
get_auto_scheduler_rewritten_layout = _ffi.get_global_func(
"relay.attrs.get_auto_scheduler_rewritten_layout"
)
# conv2d
def wrap_compute_conv2d(
topi_compute,
need_data_layout=False,
need_out_layout=False,
has_groups=False,
need_auto_scheduler_layout=False,
):
"""Wrap conv2d topi compute"""
def _compute_conv2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.out_dtype
auto_scheduler_rewritten_layout = get_auto_scheduler_rewritten_layout(attrs)
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
args = [inputs[0], inputs[1], strides, padding, dilation]
if has_groups:
args.append(attrs.groups)
if need_data_layout:
args.append(data_layout)
if need_out_layout:
args.append(out_layout)
args.append(out_dtype)
if need_auto_scheduler_layout:
args.append(auto_scheduler_rewritten_layout)
return [topi_compute(*args)]
return _compute_conv2d
@override_native_generic_func("conv2d_strategy")
def conv2d_strategy(attrs, inputs, out_type, target):
"""conv2d generic strategy"""
logger.warning("conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
data, kernel = inputs
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_nchw),
name="conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_conv2d_nhwc),
name="conv2d_nhwc.generic",
)
elif layout == "HWCN":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_hwcn),
wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn),
name="conv2d_hwcn.generic",
)
else:
raise RuntimeError("Unsupported conv2d layout {}".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw),
name="group_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nhwc, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc),
name="group_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported group_conv2d layout {}".format(layout))
return strategy
# conv2d_NCHWc
@override_native_generic_func("conv2d_NCHWc_strategy")
def conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
"""conv2d_NCHWc generic strategy"""
logger.warning("conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
if inputs[0].dtype == "int8" or inputs[0].dtype == "uint8":
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc_int8, True, True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.generic",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.generic",
)
return strategy
# depthwise_conv2d_NCHWc
@override_native_generic_func("depthwise_conv2d_NCHWc_strategy")
def depthwise_conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
"""depthwise_conv2d generic strategy"""
logger.warning("depthwise_conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_NCHWc),
name="depthwise_conv2d_NCHWc.generic",
)
return strategy
# conv2d_winograd_without_weight_transform
@override_native_generic_func("conv2d_winograd_without_weight_transform_strategy")
def conv2d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv2d_winograd_without_weight_transform")
# conv2d_gemm_without_weight_transform
@override_native_generic_func("conv2d_gemm_without_weight_transform_strategy")
def conv2d_gemm_without_weight_transform_strategy(attrs, inputs, out_type, target):
"""conv2d_gemm_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv2d_gemm_without_weight_transform")
# conv2d_winograd_weight_transform
@generic_func
def schedule_conv2d_winograd_weight_transform(attrs, outs, target):
"""Schedule conv2d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_weight_transform(outs)
# conv2d_winograd_nnpack_weight_transform
@generic_func
def schedule_conv2d_winograd_nnpack_weight_transform(attrs, outs, target):
"""Schedule conv2d_winograd_nnpack_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs)
# conv2d_gemm_weight_transform
@generic_func
def schedule_conv2d_gemm_weight_transform(attrs, outs, target):
"""Schedule conv2d_gemm_weight_transform"""
with target:
return topi.generic.schedule_conv2d_gemm_weight_transform(outs)
# deformable_conv2d
def wrap_compute_deformable_conv2d(topi_compute):
"""wrap deformable_conv2d topi compute"""
def _compute_deformable_conv2d(attrs, inputs, out_dtype):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
deformable_groups = attrs.deformable_groups
groups = attrs.groups
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(
inputs[0],
inputs[1],
inputs[2],
strides,
padding,
dilation,
deformable_groups,
groups,
out_dtype,
)
return [out]
return _compute_deformable_conv2d
@override_native_generic_func("deformable_conv2d_strategy")
def deformable_conv2d_strategy(attrs, inputs, out_type, target):
"""deformable_conv2d generic strategy"""
layout = attrs.data_layout
strategy = _op.OpStrategy()
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_deformable_conv2d_nchw),
name="deformable_conv2d_nchw.generic",
)
elif layout == "NHWC":
# This implementation should never be picked by autotvm
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nhwc),
naive_schedule,
name="deformable_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Layout %s is not supported in deformable conv2d" % layout)
return strategy
# conv2d_transpose
def wrap_compute_conv2d_transpose(topi_compute):
"""wrap conv2d_transpose topi compute"""
def compute_conv2d_transpose(attrs, inputs, out_dtype):
"""Compute definition of conv2d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
output_padding = get_const_tuple(attrs.output_padding)
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return compute_conv2d_transpose
@override_native_generic_func("conv2d_transpose_strategy")
def conv2d_transpose_strategy(attrs, inputs, out_type, target):
"""conv2d_transpose generic strategy"""
logger.warning("conv2d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.generic",
)
return strategy
# conv3d_transpose
def wrap_compute_conv3d_transpose(topi_compute):
"""wrap conv3d_transpose topi compute"""
def compute_conv3d_transpose(attrs, inputs, out_dtype):
"""Compute definition of conv3d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
output_padding = get_const_tuple(attrs.output_padding)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return compute_conv3d_transpose
@override_native_generic_func("conv3d_transpose_strategy")
def conv3d_transpose_strategy(attrs, inputs, out_type, target):
"""conv3d_transpose generic strategy"""
logger.warning("conv3d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCDHW", "only support ncdhw for now"
assert dilation == (1, 1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv3d_transpose(topi.nn.conv3d_transpose_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_transpose_ncdhw),
name="conv3d_transpose_ncdhw.generic",
)
return strategy
# conv3d
def wrap_compute_conv3d(topi_compute, need_layout=False):
"""wrap conv3d topi compute"""
def _compute_conv3d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
(dilation_d, dilation_h, dilation_w) = dilation
if dilation_d < 1 or dilation_h < 1 or dilation_w < 1:
raise ValueError("Dilation should be positive value")
if groups != 1:
raise ValueError("Not support arbitrary group number for conv3d")
if need_layout:
out = topi_compute(inputs[0], inputs[1], strides, padding, dilation, layout, out_dtype)
else:
out = topi_compute(inputs[0], inputs[1], strides, padding, dilation, out_dtype)
return [out]
return _compute_conv3d
@override_native_generic_func("conv3d_strategy")
def conv3d_strategy(attrs, inputs, out_type, target):
"""conv3d generic strategy"""
logger.warning("conv3d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.generic",
)
elif layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ndhwc),
wrap_topi_schedule(topi.generic.schedule_conv3d_ndhwc),
name="conv3d_ndhwc.generic",
)
else:
raise ValueError("Not support this layout {} yet".format(layout))
return strategy
# conv3d_winograd_without_weight_transform
@override_native_generic_func("conv3d_winograd_without_weight_transform_strategy")
def conv3d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target):
"""conv3d_winograd_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv3d_winograd_without_weight_transform")
# conv3d_winograd_weight_transform
@generic_func
def schedule_conv3d_winograd_weight_transform(attrs, outs, target):
"""Schedule conv3d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv3d_winograd_weight_transform(outs)
# conv1d
def wrap_compute_conv1d(topi_compute):
"""wrap conv1d topi compute"""
def _compute_conv1d(attrs, inputs, out_type):
"""Compute definition of conv1d"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
return [topi_compute(inputs[0], inputs[1], strides, padding, dilation, out_dtype)]
return _compute_conv1d
@override_native_generic_func("conv1d_strategy")
def conv1d_strategy(attrs, inputs, out_type, target):
"""conv1d generic strategy"""
logger.warning("conv1d is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_ncw),
name="conv1d_ncw.generic",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_nwc),
wrap_topi_schedule(topi.generic.schedule_conv1d_nwc),
name="conv1d_nwc.generic",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
# conv1d_transpose
def wrap_compute_conv1d_transpose(topi_compute):
"""wrap conv1d_transpose topi compute"""
def _compute_conv1d_tranpsoe(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
output_padding = get_const_tuple(attrs.output_padding)
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return _compute_conv1d_tranpsoe
@override_native_generic_func("conv1d_transpose_strategy")
def conv1d_transpose_strategy(attrs, inputs, out_type, target):
"""conv1d_transpose generic strategy"""
logger.warning("conv1d_transpose is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCW", "conv1d_transpose ncw only supported"
assert dilation == (1,), "conv1d_transpose dilation is not supported"
assert groups == 1, "conv1d_transpose groups == 1 only supported"
strategy.add_implementation(
wrap_compute_conv1d_transpose(topi.nn.conv1d_transpose_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_transpose_ncw),
name="conv1d_transpose_ncw.generic",
)
return strategy
# dilation2d
def wrap_compute_dilation2d(topi_compute, need_data_layout=False):
"""Wrap dilation2d topi compute"""
def _compute_dilation2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilations = get_const_tuple(attrs.dilations)
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
args = [inputs[0], inputs[1], strides, padding, dilations]
if need_data_layout:
args.append(data_layout)
args.append(out_dtype)
return [topi_compute(*args)]
return _compute_dilation2d
@override_native_generic_func("dilation2d_strategy")
def dilation2d_strategy(attrs, inputs, out_type, target):
"""dilation2d_strategy generic strategy"""
logger.warning("dilation2d_strategy is not optimized for this platform.")
strategy = _op.OpStrategy()
dilations = get_const_tuple(attrs.dilations)
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
assert layout in ["NCHW", "NHWC"]
(dilation_h, dilation_w) = dilations
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if layout == "NCHW":
assert kernel_layout == "IHW"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nchw),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nchw),
name="dilation2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWI"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nhwc),
name="dilation2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported dilation2d layout {}".format(layout))
return strategy
# dense
def wrap_compute_dense(topi_compute):
"""wrap dense topi compute"""
def _compute_dense(attrs, inputs, out_type):
"""Compute definition of dense"""
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
return [topi_compute(inputs[0], inputs[1], None, out_dtype)]
return _compute_dense
@override_native_generic_func("dense_strategy")
def dense_strategy(attrs, inputs, out_type, target):
"""dense generic strategy"""
logger.warning("dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.generic.schedule_dense),
name="dense.generic",
)
return strategy
# batch_matmul
def wrap_compute_batch_matmul(topi_compute):
"""wrap batch_matmul topi compute"""
def _compute_batch_matmul(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], out_type.shape)]
return _compute_batch_matmul
@override_native_generic_func("batch_matmul_strategy")
def batch_matmul_strategy(attrs, inputs, out_type, target):
"""batch_matmul generic strategy"""
logger.warning("batch_matmul is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_matmul(topi.nn.batch_matmul),
wrap_topi_schedule(topi.generic.schedule_batch_matmul),
name="batch_matmul.generic",
)
return strategy
# sparse dense
def wrap_compute_sparse_dense(topi_compute):
"""wrap sparse dense topi compute"""
def _compute_sparse_dense(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3], attrs["sparse_lhs"])]
return _compute_sparse_dense
@override_native_generic_func("sparse_dense_strategy")
def sparse_dense_strategy(attrs, inputs, out_type, target):
"""sparse dense generic strategy"""
logger.warning("sparse dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_dense(topi.nn.sparse_dense),
wrap_topi_schedule(topi.generic.schedule_sparse_dense),
name="sparse_dense.generic",
)
return strategy
@override_native_generic_func("sparse_dense_padded_strategy")
def sparse_dense_padded_strategy(attrs, inputs, out_type, target):
"""sparse dense padded generic strategy"""
raise NotImplementedError("sparse_dense_padded is only implemented for cuda")
# sparse_transpose
@generic_func
def schedule_sparse_transpose(attrs, outs, target):
"""schedule sparse_transpose"""
with target:
return topi.generic.schedule_sparse_transpose(outs)
# argsort
def wrap_compute_argsort(topi_compute):
"""Wrap argsort topi compute"""
def _compute_argsort(attrs, inputs, _):
axis = get_const_int(attrs.axis)
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend, dtype=dtype)]
return _compute_argsort
@override_native_generic_func("argsort_strategy")
def argsort_strategy(attrs, inputs, out_type, target):
"""argsort generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argsort(topi.argsort),
wrap_topi_schedule(topi.generic.schedule_argsort),
name="argsort.generic",
)
return strategy
# topk
def wrap_compute_topk(topi_compute):
"""Wrap topk compute"""
def _compute_topk(attrs, inputs, out_type):
if attrs.k is not None:
k = attrs.k
else:
k = inputs[1]
axis = get_const_int(attrs.axis)
ret_type = attrs.ret_type
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
out = topi_compute(inputs[0], k, axis, ret_type, is_ascend, dtype)
out = out if isinstance(out, list) else [out]
return out
return _compute_topk
@override_native_generic_func("topk_strategy")
def topk_strategy(attrs, inputs, out_type, target):
"""topk generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_topk(topi.topk),
wrap_topi_schedule(topi.generic.schedule_topk),
name="topk.generic",
)
return strategy
# multibox_prior
def wrap_compute_multibox_prior(topi_compute):
"""Wrap multibox_prior compute"""
def _compute_multibox_prior(attrs, inputs, _):
"""Compute definition of multibox_prior"""
sizes = get_float_tuple(attrs.sizes)
ratios = get_float_tuple(attrs.ratios)
steps = get_float_tuple(attrs.steps)
offsets = get_float_tuple(attrs.offsets)
clip = bool(get_const_int(attrs.clip))
return [topi_compute(inputs[0], sizes, ratios, steps, offsets, clip)]
return _compute_multibox_prior
@override_native_generic_func("multibox_prior_strategy")
def multibox_prior_strategy(attrs, inputs, out_type, target):
"""multibox_prior generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_prior(topi.vision.ssd.multibox_prior),
wrap_topi_schedule(topi.generic.schedule_multibox_prior),
name="multibox_prior.generic",
)
return strategy
# multibox_transform_loc
def wrap_compute_multibox_transform_loc(topi_compute):
"""Wrap multibox_transform_loc compute"""
def _compute_multibox_transform_loc(attrs, inputs, _):
"""Compute definition of multibox_detection"""
clip = bool(get_const_int(attrs.clip))
threshold = get_const_float(attrs.threshold)
variances = get_float_tuple(attrs.variances)
return topi_compute(inputs[0], inputs[1], inputs[2], clip, threshold, variances)
return _compute_multibox_transform_loc
@override_native_generic_func("multibox_transform_loc_strategy")
def multibox_transform_loc_strategy(attrs, inputs, out_type, target):
"""schedule multibox_transform_loc"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_transform_loc(topi.vision.ssd.multibox_transform_loc),
wrap_topi_schedule(topi.generic.schedule_multibox_transform_loc),
name="multibox_transform_loc.generic",
)
return strategy
# get_valid_counts
def wrap_compute_get_valid_counts(topi_compute):
"""wrap get_valid_counts topi compute"""
def _compute_get_valid_counts(attrs, inputs, out_type):
score_threshold = inputs[1]
id_index = get_const_int(attrs.id_index)
score_index = get_const_int(attrs.score_index)
if attrs.score_threshold is not None:
score_threshold = get_const_float(attrs.score_threshold)
return topi_compute(inputs[0], score_threshold, id_index, score_index)
return _compute_get_valid_counts
@override_native_generic_func("get_valid_counts_strategy")
def get_valid_counts_strategy(attrs, inputs, out_type, target):
"""get_valid_counts generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_get_valid_counts(topi.vision.get_valid_counts),
wrap_topi_schedule(topi.generic.schedule_get_valid_counts),
name="get_valid_counts.generic",
)
return strategy
# non-maximum suppression
def wrap_compute_nms(topi_compute):
"""wrap nms topi compute"""
def _compute_nms(attrs, inputs, out_type):
max_output_size = inputs[3]
iou_threshold = inputs[4]
if attrs.max_output_size is not None:
max_output_size = attrs.max_output_size
if attrs.iou_threshold is not None:
iou_threshold = get_const_float(attrs.iou_threshold)
return_indices = bool(get_const_int(attrs.return_indices))
force_suppress = bool(get_const_int(attrs.force_suppress))
top_k = get_const_int(attrs.top_k)
coord_start = get_const_int(attrs.coord_start)
score_index = get_const_int(attrs.score_index)
id_index = get_const_int(attrs.id_index)
invalid_to_bottom = bool(get_const_int(attrs.invalid_to_bottom))
if return_indices:
return topi_compute(
inputs[0],
inputs[1],
inputs[2],
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
return [
topi_compute(
inputs[0],
inputs[1],
inputs[2],
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
]
return _compute_nms
@override_native_generic_func("non_max_suppression_strategy")
def nms_strategy(attrs, inputs, out_type, target):
"""nms generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_nms(topi.vision.non_max_suppression),
wrap_topi_schedule(topi.generic.schedule_nms),
name="nms.generic",
)
return strategy
# roi_align
def wrap_compute_roi_align(topi_compute):
"""wrap roi_align topi compute"""
def _compute_roi_align(attrs, inputs, out_type):
assert attrs.layout == "NCHW"
pooled_size = get_const_tuple(attrs.pooled_size)
return [
topi_compute(
inputs[0],
inputs[1],
pooled_size=pooled_size,
spatial_scale=attrs.spatial_scale,
sample_ratio=attrs.sample_ratio,
)
]
return _compute_roi_align
@override_native_generic_func("roi_align_strategy")
def roi_align_strategy(attrs, inputs, out_type, target):
"""roi_align generic strategy"""
strategy = _op.OpStrategy()
layout = attrs.layout
assert layout == "NCHW", "only support nchw for now"
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw),
wrap_topi_schedule(topi.generic.schedule_roi_align),
name="roi_align.generic",
)
return strategy
# roi_pool
@generic_func
def schedule_roi_pool(attrs, outs, target):
"""schedule roi_pool"""
with target:
return topi.generic.schedule_roi_pool(outs)
# proposal
def wrap_compute_proposal(topi_compute):
"""wrap proposal topi compute"""
def _compute_proposal(attrs, inputs, out_type):
scales = get_float_tuple(attrs.scales)
ratios = get_float_tuple(attrs.ratios)
feature_stride = attrs.feature_stride
threshold = attrs.threshold
rpn_pre_nms_top_n = attrs.rpn_pre_nms_top_n
rpn_post_nms_top_n = attrs.rpn_post_nms_top_n
rpn_min_size = attrs.rpn_min_size
iou_loss = bool(get_const_int(attrs.iou_loss))
return [
topi_compute(
inputs[0],
inputs[1],
inputs[2],
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
)
]
return _compute_proposal
@override_native_generic_func("proposal_strategy")
def proposal_strategy(attrs, inputs, out_type, target):
"""proposal generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_proposal(topi.vision.rcnn.proposal),
wrap_topi_schedule(topi.generic.schedule_proposal),
name="proposal.generic",
)
return strategy
# scatter
@override_native_generic_func("scatter_strategy")
def scatter_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.scatter),
wrap_topi_schedule(topi.generic.schedule_scatter),
name="scatter.generic",
)
return strategy
def wrap_compute_scatter(topi_compute):
"""Wrap scatter topi compute"""
def _compute_scatter(attrs, inputs, _):
return [topi_compute(inputs[0], inputs[1], inputs[2], axis=attrs.axis)]
return _compute_scatter
@override_native_generic_func("scatter_add_strategy")
def scatter_add_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.scatter_add),
wrap_topi_schedule(topi.generic.schedule_scatter),
name="scatter_add.generic",
)
return strategy
# scatter_nd
@override_native_generic_func("scatter_nd_strategy")
def scatter_nd_strategy(attrs, inputs, out_type, target):
"""scatter_nd generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter_nd(topi.scatter_nd),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter_nd.generic",
)
return strategy
def wrap_compute_scatter_nd(topi_compute):
"""Wrap scatter_nd topi compute"""
def _compute_scatter_nd(attrs, inputs, _):
return [topi_compute(inputs[0], inputs[1], attrs.out_shape)]
return _compute_scatter_nd
# bitserial_conv2d
def wrap_compute_bitserial_conv2d(topi_compute):
"""wrap bitserial_conv2d topi compute"""
def compute_bitserial_conv2d(attrs, inputs, out_dtype):
"""Compute definition for bitserial conv2d."""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
activation_bits = attrs.activation_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
unipolar = attrs.unipolar
return [
topi_compute(
inputs[0],
inputs[1],
strides,
padding,
activation_bits,
weight_bits,
pack_dtype,
out_dtype,
unipolar,
)
]
return compute_bitserial_conv2d
@override_native_generic_func("bitserial_conv2d_strategy")
def bitserial_conv2d_strategy(attrs, inputs, out_type, target):
"""bitserial_conv2d generic strategy"""
logger.warning("bitserial_conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nchw),
name="bitserial_conv2d_nchw.generic",
)
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nhwc),
name="bitserial_conv2d_nhwc.generic",
)
else:
raise ValueError("Data layout {} not supported.".format(layout))
return strategy
# bitserial_dense
def wrap_compute_bitserial_dense(topi_compute):
"""wrap bitserial_dense topi compute"""
def compute_bitserial_dense(attrs, inputs, out_type):
"""Compute definition of bitserial dense"""
data_bits = attrs.data_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
unipolar = attrs.unipolar
return [
topi_compute(
inputs[0], inputs[1], data_bits, weight_bits, pack_dtype, out_dtype, unipolar
)
]
return compute_bitserial_dense
@override_native_generic_func("bitserial_dense_strategy")
def bitserial_dense_strategy(attrs, inputs, out_type, target):
"""bitserial_dense generic strategy"""
logger.warning("bitserial_dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_bitserial_dense(topi.nn.bitserial_dense),
wrap_topi_schedule(topi.generic.schedule_bitserial_dense),
name="bitserial_dense.generic",
)
return strategy
# correlation
def wrap_compute_correlation(topi_compute):
"""wrap correlation topi compute"""
def _compute_correlation(attrs, inputs, out_type):
kernel_size = attrs.kernel_size
max_displacement = attrs.max_displacement
stride1 = attrs.stride1
stride2 = attrs.stride2
padding = get_const_tuple(attrs.padding)
is_multiply = attrs.is_multiply
return [
topi_compute(
inputs[0],
inputs[1],
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
)
]
return _compute_correlation
@override_native_generic_func("correlation_strategy")
def correlation_strategy(attrs, inputs, out_type, target):
"""correlation generic strategy"""
logger.warning("correlation is not optimized for this platform.")
layout = attrs.layout
assert layout == "NCHW", "Only support NCHW layout"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_correlation(topi.nn.correlation_nchw),
wrap_topi_schedule(topi.generic.schedule_correlation_nchw),
name="correlation.generic",
)
return strategy
# argwhere
def wrap_compute_argwhere(topi_compute):
"""wrap argwhere topi compute"""
def _compute_argwhere(attrs, inputs, out_type):
output_shape = []
for s in out_type.shape:
if hasattr(s, "value"):
output_shape.append(s)
else:
output_shape.append(te.var("any_dim", "int32"))
new_output_type = ir.TensorType(output_shape, "int32")
return [topi_compute(new_output_type, inputs[0])]
return _compute_argwhere
@override_native_generic_func("argwhere_strategy")
def argwhere_strategy(attrs, inputs, out_type, target):
"""argwhere generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argwhere(topi.argwhere),
wrap_topi_schedule(topi.generic.schedule_argwhere),
name="argwhere.generic",
)
return strategy
| [
[
[
886,
893
],
[
1133,
1140
]
],
[
[
902,
904
],
[
2192,
2194
],
[
2702,
2704
],
[
2800,
2802
]
],
[
[
921,
925
],
[
3329,
3333
],
[
3490,
3494
],
[
3753,
3757
],
[
3917,
3921
],
[
4109,
4113
],
[
4300,
4304
],
[
4831,
4835
],
[
4876,
4880
],
[
5109,
5113
],
[
5264,
5268
],
[
5424,
5428
],
[
7453,
7457
],
[
7510,
7514
],
[
7755,
7759
],
[
7812,
7816
],
[
8057,
8061
],
[
8114,
8118
],
[
8536,
8540
],
[
8603,
8607
],
[
8868,
8872
],
[
8935,
8939
],
[
9326,
9330
],
[
9406,
9410
],
[
9663,
9667
],
[
9743,
9747
],
[
10375,
10379
],
[
10446,
10450
],
[
10623,
10627
],
[
10689,
10693
],
[
11177,
11181
],
[
11249,
11253
],
[
12308,
12312
],
[
12593,
12597
],
[
12852,
12856
],
[
14083,
14087
],
[
14147,
14151
],
[
14426,
14430
],
[
15946,
15950
],
[
16005,
16009
],
[
17409,
17413
],
[
17469,
17473
],
[
19003,
19007
],
[
19057,
19061
],
[
19242,
19246
],
[
19296,
19300
],
[
20069,
20073
],
[
21188,
21192
],
[
21240,
21244
],
[
21419,
21423
],
[
21471,
21475
],
[
22922,
22926
],
[
22980,
22984
],
[
24533,
24537
],
[
24593,
24597
],
[
24825,
24829
],
[
24885,
24889
],
[
25755,
25759
],
[
25798,
25802
],
[
26473,
26477
],
[
26523,
26527
],
[
27239,
27243
],
[
27289,
27293
],
[
27804,
27808
],
[
28459,
28463
],
[
28501,
28505
],
[
29368,
29372
],
[
29407,
29411
],
[
30309,
30313
],
[
30369,
30373
],
[
31290,
31294
],
[
31358,
31362
],
[
32298,
32302
],
[
32356,
32360
],
[
34398,
34402
],
[
34459,
34463
],
[
35404,
35408
],
[
35465,
35469
],
[
35690,
35694
],
[
36902,
36906
],
[
36957,
36961
],
[
37258,
37262
],
[
37300,
37304
],
[
37829,
37833
],
[
37875,
37879
],
[
38230,
38234
],
[
38275,
38279
],
[
39887,
39891
],
[
39950,
39954
],
[
40162,
40166
],
[
40225,
40229
],
[
41450,
41454
],
[
41503,
41507
],
[
42733,
42737
],
[
42787,
42791
],
[
43641,
43645
],
[
43684,
43688
]
],
[
[
927,
931
],
[
5500,
5504
]
],
[
[
933,
935
],
[
1631,
1633
],
[
43222,
43224
]
],
[
[
937,
939
],
[
43276,
43278
]
],
[
[
967,
980
],
[
27997,
28010
],
[
28048,
28061
],
[
28825,
28838
],
[
28910,
28923
],
[
29904,
29917
],
[
30736,
30749
],
[
31709,
31722
],
[
31761,
31774
],
[
32915,
32928
],
[
32982,
32995
],
[
33035,
33048
],
[
33084,
33097
],
[
33139,
33152
],
[
33191,
33204
],
[
33254,
33267
],
[
36219,
36232
]
],
[
[
982,
997
],
[
30783,
30798
],
[
31870,
31885
],
[
32848,
32863
]
],
[
[
999,
1014
],
[
1984,
1999
],
[
2482,
2497
],
[
7011,
7026
],
[
15613,
15628
],
[
17071,
17086
],
[
20943,
20958
],
[
22583,
22598
],
[
24109,
24124
],
[
5853,
5868
],
[
5902,
5917
],
[
5952,
5967
],
[
13108,
13123
],
[
13157,
13172
],
[
13207,
13222
],
[
14912,
14927
],
[
14961,
14976
],
[
15133,
15148
],
[
16370,
16385
],
[
16419,
16434
],
[
16475,
16490
],
[
17762,
17777
],
[
17811,
17826
],
[
17861,
17876
],
[
20327,
20342
],
[
20376,
20391
],
[
20426,
20441
],
[
21850,
21865
],
[
21899,
21914
],
[
22071,
22086
],
[
23290,
23305
],
[
23339,
23354
],
[
23390,
23405
],
[
34749,
34764
],
[
38849,
38864
],
[
38898,
38913
],
[
41937,
41952
]
],
[
[
1016,
1031
],
[
29714,
29729
],
[
29760,
29775
],
[
29806,
29821
],
[
29853,
29868
],
[
30836,
30851
],
[
35888,
35903
],
[
35935,
35950
]
],
[
[
1055,
1067
],
[
3206,
3218
],
[
3370,
3382
],
[
3627,
3639
],
[
3801,
3813
],
[
3979,
3991
],
[
4166,
4178
],
[
4983,
4995
],
[
5154,
5166
],
[
5309,
5321
],
[
12143,
12155
],
[
12414,
12426
],
[
12695,
12707
],
[
19904,
19916
],
[
27671,
27683
],
[
35573,
35585
]
],
[
[
1069,
1097
],
[
4598,
4626
],
[
6737,
6765
],
[
9985,
10013
],
[
10825,
10853
],
[
11415,
11443
],
[
11789,
11817
],
[
13745,
13773
],
[
15326,
15354
],
[
16784,
16812
],
[
18644,
18672
],
[
19534,
19562
],
[
20696,
20724
],
[
22264,
22292
],
[
23826,
23854
],
[
25466,
25494
],
[
26149,
26177
],
[
26915,
26943
],
[
27392,
27420
],
[
28226,
28254
],
[
29147,
29175
],
[
30048,
30076
],
[
31005,
31033
],
[
32029,
32057
],
[
34165,
34193
],
[
35080,
35108
],
[
36665,
36693
],
[
37062,
37090
],
[
37625,
37653
],
[
37985,
38013
],
[
39479,
39507
],
[
41111,
41139
],
[
42332,
42360
],
[
43404,
43432
]
],
[
[
1113,
1122
],
[
3527,
3530
],
[
3572,
3575
],
[
4752,
4755
],
[
6953,
6956
],
[
10225,
10228
],
[
11099,
11102
],
[
13960,
13963
],
[
15858,
15861
],
[
17321,
17324
],
[
18860,
18863
],
[
21078,
21081
],
[
22520,
22523
],
[
24076,
24079
],
[
25678,
25681
],
[
26389,
26392
],
[
27155,
27158
],
[
28380,
28383
],
[
29292,
29295
],
[
30223,
30226
],
[
31196,
31199
],
[
32210,
32213
],
[
34323,
34326
],
[
35240,
35243
],
[
36822,
36825
],
[
37179,
37182
],
[
37750,
37753
],
[
38148,
38151
],
[
39735,
39738
],
[
41363,
41366
],
[
42650,
42653
],
[
43561,
43564
]
],
[
[
1124,
1130
],
[
6877,
6883
],
[
10143,
10149
],
[
11007,
11013
],
[
15496,
15502
],
[
16954,
16960
],
[
18784,
18790
],
[
20836,
20842
],
[
22434,
22440
],
[
23987,
23993
],
[
25603,
25609
],
[
26307,
26313
],
[
27073,
27079
],
[
39649,
39655
],
[
41278,
41284
],
[
42487,
42493
]
],
[
[
1169,
1183
],
[
14471,
14485
]
],
[
[
1669,
1687
],
[
4857,
4875
],
[
7491,
7509
],
[
7793,
7811
],
[
8095,
8113
],
[
8584,
8602
],
[
8916,
8934
],
[
9387,
9405
],
[
9724,
9742
],
[
10427,
10445
],
[
10670,
10688
],
[
11230,
11248
],
[
14128,
14146
],
[
15986,
16004
],
[
17450,
17468
],
[
19038,
19056
],
[
19277,
19295
],
[
21221,
21239
],
[
21452,
21470
],
[
22961,
22979
],
[
24574,
24592
],
[
24866,
24884
],
[
25779,
25797
],
[
26504,
26522
],
[
27270,
27288
],
[
28482,
28500
],
[
29388,
29406
],
[
30350,
30368
],
[
31339,
31357
],
[
32337,
32355
],
[
34440,
34458
],
[
35446,
35464
],
[
36938,
36956
],
[
37281,
37299
],
[
37856,
37874
],
[
38256,
38274
],
[
39931,
39949
],
[
40206,
40224
],
[
41484,
41502
],
[
42768,
42786
],
[
43665,
43683
]
],
[
[
1882,
1904
],
[
3063,
3085
]
],
[
[
2372,
2395
],
[
3120,
3143
]
],
[
[
2971,
2990
],
[
8309,
8328
]
],
[
[
3223,
3241
],
[
3553,
3571
]
],
[
[
3387,
3402
],
[
3595,
3610
]
],
[
[
3644,
3664
]
],
[
[
3818,
3831
]
],
[
[
3996,
4014
]
],
[
[
4183,
4205
]
],
[
[
4358,
4378
],
[
4810,
4830
]
],
[
[
4651,
4667
]
],
[
[
5000,
5020
]
],
[
[
5171,
5183
]
],
[
[
5326,
5342
]
],
[
[
5462,
5497
],
[
6162,
6197
]
],
[
[
5592,
5611
],
[
7433,
7452
],
[
7735,
7754
],
[
8037,
8056
],
[
8516,
8535
],
[
8848,
8867
],
[
9306,
9325
],
[
9643,
9662
],
[
10355,
10374
],
[
10603,
10622
],
[
11157,
11176
]
],
[
[
6789,
6804
]
],
[
[
10043,
10064
]
],
[
[
10893,
10924
]
],
[
[
11501,
11550
]
],
[
[
11871,
11916
]
],
[
[
12160,
12201
]
],
[
[
12431,
12479
]
],
[
[
12712,
12749
]
],
[
[
12935,
12965
],
[
14052,
14082
],
[
14395,
14425
]
],
[
[
13808,
13834
]
],
[
[
14690,
14719
],
[
15916,
15945
]
],
[
[
15388,
15413
]
],
[
[
16148,
16177
],
[
17379,
17408
]
],
[
[
16846,
16871
]
],
[
[
17604,
17623
],
[
18983,
19002
],
[
19222,
19241
]
],
[
[
18696,
18711
]
],
[
[
19620,
19669
]
],
[
[
19921,
19962
]
],
[
[
20145,
20164
],
[
21168,
21187
],
[
21399,
21418
]
],
[
[
20748,
20763
]
],
[
[
21682,
21711
],
[
22892,
22921
]
],
[
[
22326,
22351
]
],
[
[
23115,
23138
],
[
24509,
24532
],
[
24801,
24824
]
],
[
[
23882,
23901
]
],
[
[
25101,
25119
],
[
25736,
25754
]
],
[
[
25517,
25531
]
],
[
[
25905,
25930
],
[
26447,
26472
]
],
[
[
26207,
26228
]
],
[
[
26644,
26669
],
[
27213,
27238
]
],
[
[
26973,
26994
]
],
[
[
27457,
27485
]
],
[
[
27688,
27713
]
],
[
[
27865,
27885
],
[
28438,
28458
]
],
[
[
28279,
28295
]
],
[
[
28604,
28621
],
[
29350,
29367
]
],
[
[
29197,
29210
]
],
[
[
29514,
29541
],
[
30281,
30308
]
],
[
[
30108,
30131
]
],
[
[
30504,
30539
],
[
31254,
31289
]
],
[
[
31073,
31104
]
],
[
[
31503,
31532
],
[
32268,
32297
]
],
[
[
32091,
32116
]
],
[
[
32496,
32512
],
[
34381,
34397
]
],
[
[
34230,
34242
]
],
[
[
34559,
34581
],
[
35381,
35403
]
],
[
[
35135,
35153
]
],
[
[
35590,
35607
]
],
[
[
35744,
35765
],
[
36880,
36901
]
],
[
[
36719,
36736
]
],
[
[
37115,
37131
]
],
[
[
37396,
37416
],
[
37237,
37257
],
[
37808,
37828
]
],
[
[
37682,
37702
]
],
[
[
38041,
38060
]
],
[
[
38373,
38396
],
[
38206,
38229
]
],
[
[
38625,
38654
],
[
39857,
39886
],
[
40132,
40161
]
],
[
[
39541,
39566
]
],
[
[
40458,
40486
],
[
41421,
41449
]
],
[
[
41172,
41196
]
],
[
[
41629,
41653
],
[
42708,
42732
]
],
[
[
42389,
42409
]
],
[
[
42907,
42928
],
[
43619,
43640
]
],
[
[
43458,
43475
]
]
] |
from itertools import product
import math
from collections import OrderedDict
from pathlib import Path
import logging
import pandas as pd
import numpy as np
import geopandas as gpd
import shapely.geometry as sg
import googlemaps
# Configure logging
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s \n%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
WGS84 = {'init': 'epsg:4326'}
# Maximum number of elements in a Google Maps Distance Matrix API query
MAX_ELEMENTS = 100
def flip_coords(xy_list):
"""
Given a list of coordinate pairs, swap the first and second
coordinates and return the resulting list.
"""
return [(y, x) for (x, y) in xy_list]
def make_ids(n, prefix='row_'):
"""
Return a list of ``n`` (integer) unique strings of the form
``prefix``<number>.
"""
k = int(math.log10(n)) + 1 # Number of digits for padding
return [prefix + '{num:0{pad}d}'.format(num=i, pad=k) for i in range(n)]
def to_df(distance_matrix_response, origin_ids=None, destination_ids=None):
"""
Given a (decoded) JSON response to a Google Maps
Distance Matrix API call, convert it into a DataFrame with the
following columns.
- ``'origin_address'``
- ``'origin_id'``: ID of origin; defaults to an element of
:func:`make_ids`
- ``'destination_address'``
- ``'destination_id'``: ID of destination; defaluts to an element of
:func:`make_ids`
- ``'duration'``: time from origin to destination; includes
time in traffic if that's available in the response
- ``'distance'``: distance from origin to destination
The origin and destination addresses in the response can optionally
be assigned IDs by setting ``origin_ids`` (list of strings) and
``destination_ids`` (list of strings).
"""
# Initialize
r = distance_matrix_response
columns = ['origin_address', 'destination_address', 'origin_id',
'destination_id', 'duration', 'distance']
f = pd.DataFrame([], columns=columns)
# Append addresses
if not r['rows']:
return f
f['origin_address'], f['destination_address'] = zip(
*product(r['origin_addresses'], r['destination_addresses']))
# Append IDs
if origin_ids is None:
origin_ids = make_ids(len(r['origin_addresses']))
if destination_ids is None:
destination_ids = make_ids(len(r['destination_addresses']))
f['origin_id'], f['destination_id'] = zip(
*product(origin_ids, destination_ids))
# Append durations and distances
durs = []
dists = []
for row in r['rows']:
for e in row['elements']:
if e['status'] == 'OK':
if 'duration_in_traffic' in e:
dur_key = 'duration_in_traffic'
else:
dur_key = 'duration'
durs.append(e[dur_key]['value'])
dists.append(e['distance']['value'])
else:
durs.append(np.nan)
dists.append(np.nan)
f['duration'] = durs
f['distance'] = dists
return f
def point_df_to_gdf(f, x_col='lon', y_col='lat', from_crs=WGS84):
"""
Given a DataFrame of points with x coordinates
in the column ``x_col`` and y coordinates in the column ``y_col``,
with respect to the GeoPandas coordinate reference system
``from_crs`` (dictionary), convert the DataFrame into a GeoDataFrame
with that coordinate reference system and with a ``'geometry'``
column that corresponds to the points.
Delete the original x and y columns, and return the result.
"""
f = f.copy()
f['geometry'] = f[[x_col, y_col]].apply(lambda p: sg.Point(p), axis=1)
f = f.drop([x_col, y_col], axis=1)
f = gpd.GeoDataFrame(f)
f.crs = from_crs
return f
def point_gdf_to_df(f, x_col='lon', y_col='lat', to_crs=WGS84):
"""
The inverse of :func:`point_df_to_gdf`.
Given a GeoDataFrame of points, convert to the coordinate reference
system ``to_crs`` (dictionary), then split its ``'geometry'`` column
into x coordinates in the column ``x_col`` and y coordinates in the
columns ``y_col``, deleting the ``'geometry'`` column afterwards.
Coerce the result into a DataFrame and return it.
"""
f = f.copy()
if f.crs is None:
raise ValueError('GeoDataFrame needs a crs attribute')
if f.crs != to_crs:
f = f.to_crs(to_crs)
f[x_col], f[y_col] = zip(*f['geometry'].map(lambda p: p.coords[0]))
del f['geometry']
return pd.DataFrame(f)
def build_distance_matrix_df(client, origins_gdf, destinations_gdf,
origin_id_col=None, destination_id_col=None,
max_elements=MAX_ELEMENTS, **distance_matrix_kwargs):
"""
Compute the duration-distance matrix between the given origins
and destinations, assuming that the number of origins multiplied
by the number of destinations is at most ``max_elements``.
To do this, call the Google Maps Distance Matrix API once.
INPUT:
- ``client``: google-maps-services-python Client instance
- ``origins_gdf``: GeoDataFrame of point; the origins
- ``destinations_gdf``: GeoDataFrame of points; the destinations
- ``origin_id_col``: string; name of ID column in ``origins_gdf``
- ``destination_id_col``: string; name of ID column in
``destinations_gdf``
- ``max_elements``: integer; max number of elements allowable in
one Google Maps Distance Matrix API call
- ``distance_matrix_kwargs``: dictionary; keyword arguments for
Google Maps Distance Matrix API
OUTPUT:
A DataFrame of the form output by :func:`to_df` where the origins
come from ``origins_gdf`` and the destinations come from
``destinations_gdf``.
Return an empty DataFrame with the expected column names if an
HTTPError on Timeout exception occurs.
"""
# Initialize origin and destinations GeoDataFrames
o_gdf = origins_gdf.copy()
d_gdf = destinations_gdf.copy()
n = o_gdf.shape[0]*d_gdf.shape[0]
if n > max_elements:
raise ValueError('Number of origins times number of destinations '
'is {}, which exceeds threshold of {} elements'.format(
n, max_elements))
# Prepare origin data
if o_gdf.crs != WGS84:
o_gdf = o_gdf.to_crs(WGS84)
if origin_id_col is None:
origin_id_col = 'temp_id'
o_gdf[origin_id_col] = make_ids(o_gdf.shape[0])
o_locs = [geo.coords[0] for geo in o_gdf['geometry']]
o_ids = o_gdf[origin_id_col].values
# Prepare destination data
if d_gdf.crs != WGS84:
d_gdf = d_gdf.to_crs(WGS84)
if destination_id_col is None:
destination_id_col = 'temp_id'
d_gdf[destination_id_col] = make_ids(d_gdf.shape[0])
d_locs = [geo.coords[0] for geo in d_gdf['geometry']]
d_ids = d_gdf[destination_id_col].values
# Get matrix info
try:
r = client.distance_matrix(flip_coords(o_locs),
flip_coords(d_locs), **distance_matrix_kwargs)
f = to_df(r, o_ids, d_ids)
except (googlemaps.exceptions.HTTPError, googlemaps.exceptions.Timeout):
# Empty DataFrame
f = pd.DataFrame(columns=[
'origin_address',
'origin_id',
'destination_address',
'destination_id',
'duration',
'distance',
])
return f
def run_distance_matrix_job(client, origins_gdf, destinations_gdf, out_dir,
origin_id_col=None, destination_id_col=None,
max_elements=MAX_ELEMENTS, **distance_matrix_kwargs):
"""
Compute the duration-distance matrix between the given origins
and destinations.
To do this, call the Google Maps Distance Matrix API repeatedly,
ensuring that each call uses no more than ``max_elements`` elements.
INPUT:
- ``client``: google-maps-services-python Client instance
- ``origins_gdf``: GeoDataFrame of points; the origins
- ``destinations_gdf``: GeoDataFrame of points; the destinations
- ``out_dir``: string or Path object of a directory at which
to store the output files; create the directory if it does not
exist
- ``origin_id_col``: string; name of ID column in ``origins_gdf``
- ``destination_id_col``: string; name of ID column in
``destinations_gdf``
- ``max_elements``: integer; max number of elements allowable in
one Google Maps Distance Matrix API call
- ``distance_matrix_kwargs``: dictionary; keyword arguments for
Google Maps Distance Matrix API
OUTPUT:
A collection of CSV files located at ``out_dir`` of the form output
by :func:`to_df`, where the origins comes from ``origins_gdf`` and
the destinations come from ``destinations_gdf``.
Each file will contains one origin points and at most
``max_elements`` destination points, for a total of at most
``max_elements`` rows.
An empty DataFrame with the expected column names will be saved to
file if an HTTPError on Timeout exception occurs.
This can happen if, for example, the daily query limit is exceeded.
"""
o_gdf = origins_gdf.copy()
d_gdf = destinations_gdf.copy()
n_o = o_gdf.shape[0]
n_d = d_gdf.shape[0]
# Create IDs if necessary
if origin_id_col is None:
origin_id_col = 'ersatz_origin_id'
o_gdf[origin_id_col] = make_ids(n_o, 'orig_row_')
if destination_id_col is None:
destination_id_col = 'ersatz_destination_id'
d_gdf[destination_id_col] = make_ids(n_d, 'dest_row_')
# Get mode for logging
mode = distance_matrix_kwargs.get('mode', 'driving')
# Make output directory if it does not exist
out_dir = Path(out_dir)
if not out_dir.exists():
out_dir.mkdir(parents=True)
# Iterate through origins.
# For each origin segment all destinations into chunks of size
# at most ``max_elements``.
# For each destination chunk, build a one-to-many matrix from the
# origin to all the destinations in the chunk and save it to file.
for ix, orig_id in o_gdf[[origin_id_col]].itertuples():
logger.info('Working on origin {} of {} (id {})'.format(
ix + 1, n_o, orig_id))
# Chunk destinations and build one-to-many matrices from origin
# to destination chunks.
# A failed attempt (e.g. through API usage over limit)
# will build an empty matrix
for j in range(math.ceil(n_d/max_elements)):
n1 = max_elements*j
n2 = min(max_elements*(j + 1), n_d)
dest_id1, dest_id2 = (
d_gdf[destination_id_col].iat[n1],
d_gdf[destination_id_col].iat[n2 - 1]
)
path = Path(out_dir)/'{}_from_{}_to_{}--{}.csv'.format(
mode, orig_id, dest_id1, dest_id2)
f = build_distance_matrix_df(client, o_gdf.loc[ix:ix],
d_gdf.iloc[n1:n2],
origin_id_col=origin_id_col,
destination_id_col=destination_id_col,
**distance_matrix_kwargs)
f.to_csv(path, index=False)
if f.empty:
logger.info('* Failed to get data for ' + path.stem)
def compute_cost(n, cost=0.5/1000, num_freebies=0,
daily_limit=100000, chunk_size=MAX_ELEMENTS):
"""
Estimate the cost of a sequence of Google Maps Distance Matrix
queries comprising a total of n elements at ``cost`` USD per
element, where the first ``num_freebies`` (integer) elements are
free.
Return a Series that includes the cost and some other metadata.
"""
d = OrderedDict()
d['#elements'] = n
d['exceeds {!s}-element daily limit?'.format(daily_limit)] = (
n > daily_limit)
d['estimated cost for job in USD'] = max(0, n - num_freebies)*cost
d['estimated duration for job in minutes'] = n/chunk_size/60
return pd.Series(d)
| [
[
[
22,
29
],
[
2269,
2276
],
[
2589,
2596
]
],
[
[
37,
41
],
[
961,
965
],
[
10512,
10516
]
],
[
[
66,
77
],
[
11666,
11677
]
],
[
[
98,
102
],
[
9774,
9778
],
[
10795,
10799
]
],
[
[
110,
117
],
[
261,
268
],
[
291,
298
],
[
327,
334
],
[
480,
487
]
],
[
[
126,
138
],
[
2106,
2108
],
[
4645,
4647
],
[
7267,
7269
],
[
11942,
11944
]
],
[
[
146,
157
],
[
3100,
3102
],
[
3137,
3139
]
],
[
[
165,
181
],
[
3864,
3867
]
],
[
[
189,
211
],
[
3796,
3798
]
],
[
[
219,
229
],
[
7163,
7173
],
[
7196,
7206
]
],
[
[
252,
258
],
[
437,
443
],
[
464,
470
],
[
10193,
10199
],
[
11210,
11216
]
],
[
[
281,
288
],
[
405,
412
],
[
455,
462
]
],
[
[
315,
324
],
[
426,
435
]
],
[
[
495,
500
],
[
3269,
3274
],
[
3975,
3980
],
[
6375,
6380
],
[
6411,
6416
],
[
6689,
6694
],
[
6725,
6730
]
],
[
[
597,
609
],
[
4792,
4804
],
[
7622,
7634
],
[
11348,
11360
]
],
[
[
621,
632
],
[
7038,
7049
],
[
7069,
7080
]
],
[
[
817,
825
],
[
2395,
2403
],
[
2491,
2499
],
[
6513,
6521
],
[
6842,
6850
],
[
9446,
9454
],
[
9598,
9606
]
],
[
[
1094,
1099
],
[
7128,
7133
]
],
[
[
3215,
3230
]
],
[
[
3923,
3938
]
],
[
[
4666,
4690
],
[
10909,
10933
]
],
[
[
7488,
7511
]
],
[
[
11268,
11280
]
]
] |
import unittest
import gevent
from gevent import sleep
from gevent.queue import Queue
import mock
from locust import events
from locust.core import Locust, TaskSet, task
from locust.exception import LocustError
from locust.main import parse_options
from locust.rpc import Message
from locust.runners import LocalLocustRunner, MasterLocustRunner
from locust.stats import global_stats, RequestStats
from locust.test.testcases import LocustTestCase
def mocked_rpc_server():
class MockedRpcServer(object):
queue = Queue()
outbox = []
def __init__(self, host, port):
pass
@classmethod
def mocked_send(cls, message):
cls.queue.put(message.serialize())
sleep(0)
def recv(self):
results = self.queue.get()
return Message.unserialize(results)
def send(self, message):
self.outbox.append(message.serialize())
return MockedRpcServer
class TestMasterRunner(LocustTestCase):
def setUp(self):
global_stats.reset_all()
self._slave_report_event_handlers = [h for h in events.slave_report._handlers]
parser, _, _ = parse_options()
args = [
"--clients", "10",
"--hatch-rate", "10"
]
opts, _ = parser.parse_args(args)
self.options = opts
def tearDown(self):
events.slave_report._handlers = self._slave_report_event_handlers
def test_slave_connect(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "zeh_fake_client1"))
self.assertEqual(1, len(master.clients))
self.assertTrue("zeh_fake_client1" in master.clients, "Could not find fake client in master instance's clients dict")
server.mocked_send(Message("client_ready", None, "zeh_fake_client2"))
server.mocked_send(Message("client_ready", None, "zeh_fake_client3"))
server.mocked_send(Message("client_ready", None, "zeh_fake_client4"))
self.assertEqual(4, len(master.clients))
server.mocked_send(Message("quit", None, "zeh_fake_client3"))
self.assertEqual(3, len(master.clients))
def test_slave_stats_report_median(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "fake_client"))
master.stats.get("/", "GET").log(100, 23455)
master.stats.get("/", "GET").log(800, 23455)
master.stats.get("/", "GET").log(700, 23455)
data = {"user_count":1}
events.report_to_master.fire(client_id="fake_client", data=data)
master.stats.clear_all()
server.mocked_send(Message("stats", data, "fake_client"))
s = master.stats.get("/", "GET")
self.assertEqual(700, s.median_response_time)
def test_master_total_stats(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "fake_client"))
stats = RequestStats()
stats.log_request("GET", "/1", 100, 3546)
stats.log_request("GET", "/1", 800, 56743)
stats2 = RequestStats()
stats2.log_request("GET", "/2", 700, 2201)
server.mocked_send(Message("stats", {
"stats":stats.serialize_stats(),
"stats_total": stats.total.serialize(),
"errors":stats.serialize_errors(),
"user_count": 1,
}, "fake_client"))
server.mocked_send(Message("stats", {
"stats":stats2.serialize_stats(),
"stats_total": stats2.total.serialize(),
"errors":stats2.serialize_errors(),
"user_count": 2,
}, "fake_client"))
self.assertEqual(700, master.stats.total.median_response_time)
def test_master_current_response_times(self):
class MyTestLocust(Locust):
pass
start_time = 1
with mock.patch("time.time") as mocked_time:
mocked_time.return_value = start_time
global_stats.reset_all()
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
mocked_time.return_value += 1
server.mocked_send(Message("client_ready", None, "fake_client"))
stats = RequestStats()
stats.log_request("GET", "/1", 100, 3546)
stats.log_request("GET", "/1", 800, 56743)
server.mocked_send(Message("stats", {
"stats":stats.serialize_stats(),
"stats_total": stats.total.get_stripped_report(),
"errors":stats.serialize_errors(),
"user_count": 1,
}, "fake_client"))
mocked_time.return_value += 1
stats2 = RequestStats()
stats2.log_request("GET", "/2", 400, 2201)
server.mocked_send(Message("stats", {
"stats":stats2.serialize_stats(),
"stats_total": stats2.total.get_stripped_report(),
"errors":stats2.serialize_errors(),
"user_count": 2,
}, "fake_client"))
mocked_time.return_value += 4
self.assertEqual(400, master.stats.total.get_current_response_time_percentile(0.5))
self.assertEqual(800, master.stats.total.get_current_response_time_percentile(0.95))
# let 10 second pass, do some more requests, send it to the master and make
# sure the current response time percentiles only accounts for these new requests
mocked_time.return_value += 10
stats.log_request("GET", "/1", 20, 1)
stats.log_request("GET", "/1", 30, 1)
stats.log_request("GET", "/1", 3000, 1)
server.mocked_send(Message("stats", {
"stats":stats.serialize_stats(),
"stats_total": stats.total.get_stripped_report(),
"errors":stats.serialize_errors(),
"user_count": 2,
}, "fake_client"))
self.assertEqual(30, master.stats.total.get_current_response_time_percentile(0.5))
self.assertEqual(3000, master.stats.total.get_current_response_time_percentile(0.95))
def test_spawn_zero_locusts(self):
class MyTaskSet(TaskSet):
@task
def my_task(self):
pass
class MyTestLocust(Locust):
task_set = MyTaskSet
min_wait = 100
max_wait = 100
runner = LocalLocustRunner([MyTestLocust], self.options)
timeout = gevent.Timeout(2.0)
timeout.start()
try:
runner.start_hatching(0, 1, wait=True)
runner.greenlet.join()
except gevent.Timeout:
self.fail("Got Timeout exception. A locust seems to have been spawned, even though 0 was specified.")
finally:
timeout.cancel()
def test_spawn_uneven_locusts(self):
"""
Tests that we can accurately spawn a certain number of locusts, even if it's not an
even number of the connected slaves
"""
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
for i in range(5):
server.mocked_send(Message("client_ready", None, "fake_client%i" % i))
master.start_hatching(7, 7)
self.assertEqual(5, len(server.outbox))
num_clients = 0
for msg in server.outbox:
num_clients += Message.unserialize(msg).data["num_clients"]
self.assertEqual(7, num_clients, "Total number of locusts that would have been spawned is not 7")
def test_spawn_fewer_locusts_than_slaves(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
for i in range(5):
server.mocked_send(Message("client_ready", None, "fake_client%i" % i))
master.start_hatching(2, 2)
self.assertEqual(5, len(server.outbox))
num_clients = 0
for msg in server.outbox:
num_clients += Message.unserialize(msg).data["num_clients"]
self.assertEqual(2, num_clients, "Total number of locusts that would have been spawned is not 2")
def test_exception_in_task(self):
class HeyAnException(Exception):
pass
class MyLocust(Locust):
class task_set(TaskSet):
@task
def will_error(self):
raise HeyAnException(":(")
runner = LocalLocustRunner([MyLocust], self.options)
l = MyLocust()
l._catch_exceptions = False
self.assertRaises(HeyAnException, l.run)
self.assertRaises(HeyAnException, l.run)
self.assertEqual(1, len(runner.exceptions))
hash_key, exception = runner.exceptions.popitem()
self.assertTrue("traceback" in exception)
self.assertTrue("HeyAnException" in exception["traceback"])
self.assertEqual(2, exception["count"])
def test_exception_is_catched(self):
""" Test that exceptions are stored, and execution continues """
class HeyAnException(Exception):
pass
class MyTaskSet(TaskSet):
def __init__(self, *a, **kw):
super(MyTaskSet, self).__init__(*a, **kw)
self._task_queue = [
{"callable":self.will_error, "args":[], "kwargs":{}},
{"callable":self.will_stop, "args":[], "kwargs":{}},
]
@task(1)
def will_error(self):
raise HeyAnException(":(")
@task(1)
def will_stop(self):
self.interrupt()
class MyLocust(Locust):
min_wait = 10
max_wait = 10
task_set = MyTaskSet
runner = LocalLocustRunner([MyLocust], self.options)
l = MyLocust()
# supress stderr
with mock.patch("sys.stderr") as mocked:
l.task_set._task_queue = [l.task_set.will_error, l.task_set.will_stop]
self.assertRaises(LocustError, l.run) # make sure HeyAnException isn't raised
l.task_set._task_queue = [l.task_set.will_error, l.task_set.will_stop]
self.assertRaises(LocustError, l.run) # make sure HeyAnException isn't raised
self.assertEqual(2, len(mocked.method_calls))
# make sure exception was stored
self.assertEqual(1, len(runner.exceptions))
hash_key, exception = runner.exceptions.popitem()
self.assertTrue("traceback" in exception)
self.assertTrue("HeyAnException" in exception["traceback"])
self.assertEqual(2, exception["count"])
class TestMessageSerializing(unittest.TestCase):
def test_message_serialize(self):
msg = Message("client_ready", None, "my_id")
rebuilt = Message.unserialize(msg.serialize())
self.assertEqual(msg.type, rebuilt.type)
self.assertEqual(msg.data, rebuilt.data)
self.assertEqual(msg.node_id, rebuilt.node_id)
| [
[
[
7,
15
],
[
12170,
12178
]
],
[
[
24,
30
],
[
7530,
7536
],
[
7697,
7703
]
],
[
[
50,
55
],
[
740,
745
]
],
[
[
81,
86
],
[
526,
531
]
],
[
[
95,
99
],
[
1600,
1604
],
[
2566,
2570
],
[
3431,
3435
],
[
4652,
4656
],
[
4796,
4800
],
[
8155,
8159
],
[
8924,
8928
],
[
11377,
11381
]
],
[
[
119,
125
],
[
1147,
1153
],
[
1420,
1426
],
[
3024,
3030
]
],
[
[
150,
156
],
[
1552,
1558
],
[
2518,
2524
],
[
3383,
3389
],
[
4581,
4587
],
[
7333,
7339
],
[
8107,
8113
],
[
8876,
8882
],
[
9694,
9700
],
[
11143,
11149
]
],
[
[
158,
165
],
[
7213,
7220
],
[
9730,
9737
],
[
10587,
10594
]
],
[
[
167,
171
],
[
7236,
7240
],
[
9757,
9761
],
[
10926,
10930
],
[
11037,
11041
]
],
[
[
201,
212
],
[
11526,
11537
],
[
11699,
11710
]
],
[
[
237,
250
],
[
1202,
1215
]
],
[
[
274,
281
],
[
1767,
1774
],
[
2032,
2039
],
[
2114,
2121
],
[
2196,
2203
],
[
2344,
2351
],
[
2733,
2740
],
[
3170,
3177
],
[
3598,
3605
],
[
3910,
3917
],
[
4181,
4188
],
[
5017,
5024
],
[
5254,
5261
],
[
5703,
5710
],
[
6675,
6682
],
[
8357,
8364
],
[
8624,
8631
],
[
9126,
9133
],
[
9393,
9400
],
[
12242,
12249
],
[
12299,
12306
],
[
840,
847
]
],
[
[
309,
326
],
[
7455,
7472
],
[
9873,
9890
],
[
11263,
11280
]
],
[
[
328,
346
],
[
1689,
1707
],
[
2655,
2673
],
[
3520,
3538
],
[
4889,
4907
],
[
8244,
8262
],
[
9013,
9031
]
],
[
[
372,
384
],
[
1066,
1078
],
[
4754,
4766
]
],
[
[
386,
398
],
[
3664,
3676
],
[
3809,
3821
],
[
5087,
5099
],
[
5594,
5606
]
],
[
[
433,
447
],
[
1020,
1034
]
],
[
[
454,
471
],
[
1636,
1653
],
[
2602,
2619
],
[
3467,
3484
],
[
4832,
4849
],
[
8191,
8208
],
[
8960,
8977
]
],
[
[
1003,
1019
]
],
[
[
12147,
12169
]
]
] |
import os
import datetime
import hashlib
import pexpect
from config import *
from common import openssl, jsonMessage, gencrl
from OpenSSL import crypto
# 通过证书文件吊销证书
def revokeFromCert(cert):
# 读取证书数据
try:
x509_obj = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
# get_serial_number返回10进制的serial,需转为16进制
serial = hex(x509_obj.get_serial_number())[2:]
except crypto.Error:
return jsonMessage(status=-1,
msg="[ERROR]: Wrong certificate (X509) format!")
# 存到临时文件夹里
path = os.path.join(
'/tmp',
hashlib.md5(str(datetime.datetime.now()).encode('utf-8')).hexdigest() +
"_revokecert.crt")
with open(path, "w") as f:
f.write(cert.decode('utf8'))
return revoking(path, serial)
# 通过serial吊销证书,方法是去CA/newcerts文件夹下寻找相应证书的备份
# @serial:必须为16进制格式
def revokeFromSerial(serial):
path = os.path.join(CA_NEWCERTS, serial + ".pem")
if not os.path.exists(path):
msg = "[ERROR]: This may be an invalid serial number!"
return jsonMessage(-1, msg)
return revoking(path, serial)
def revoking(certfile, serial):
child = openssl('ca', '-revoke', certfile)
ret = child.expect(
['Already revoked', 'Revoking Certificate', pexpect.EOF])
if ret == 0:
msg = "[ERROR]: This certificate is revoked!"
return jsonMessage(-1, msg)
elif ret == 1:
msg = "Revoke Certificate success! Serial number is " + serial
# 重新生成一遍证书文件
gencrl()
return jsonMessage(0, msg, {"Serial Number": serial})
elif ret == 2:
msg = "[ERROR]: Revoke failed, unknown error!"
return jsonMessage(-1, msg)
| [
[
[
7,
9
],
[
555,
557
],
[
902,
904
],
[
956,
958
]
],
[
[
17,
25
],
[
609,
617
]
],
[
[
33,
40
],
[
593,
600
]
],
[
[
48,
55
],
[
1269,
1276
]
],
[
[
75,
76
],
[
915,
926
]
],
[
[
96,
103
],
[
1158,
1165
]
],
[
[
105,
116
],
[
429,
440
],
[
1056,
1067
],
[
1369,
1380
],
[
1533,
1544
],
[
1669,
1680
]
],
[
[
118,
124
],
[
1509,
1515
]
],
[
[
145,
151
],
[
234,
240
],
[
258,
264
],
[
400,
406
]
],
[
[
171,
185
]
],
[
[
865,
881
]
],
[
[
1118,
1126
],
[
772,
780
],
[
1089,
1097
]
]
] |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
from board import SCL, SDA
import busio
from adafruit_neotrellis.neotrellis import NeoTrellis
# create the i2c object for the trellis
i2c_bus = busio.I2C(SCL, SDA)
# create the trellis
trellis = NeoTrellis(i2c_bus)
# some color definitions
OFF = (0, 0, 0)
RED = (255, 0, 0)
YELLOW = (255, 150, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
BLUE = (0, 0, 255)
PURPLE = (180, 0, 255)
# this will be called when button events are received
def blink(event):
# turn the LED on when a rising edge is detected
if event.edge == NeoTrellis.EDGE_RISING:
trellis.pixels[event.number] = CYAN
# turn the LED off when a rising edge is detected
elif event.edge == NeoTrellis.EDGE_FALLING:
trellis.pixels[event.number] = OFF
for i in range(16):
# activate rising edge events on all keys
trellis.activate_key(i, NeoTrellis.EDGE_RISING)
# activate falling edge events on all keys
trellis.activate_key(i, NeoTrellis.EDGE_FALLING)
# set all keys to trigger the blink callback
trellis.callbacks[i] = blink
# cycle the LEDs on startup
trellis.pixels[i] = PURPLE
time.sleep(0.05)
for i in range(16):
trellis.pixels[i] = OFF
time.sleep(0.05)
while True:
# call the sync function call any triggered callbacks
trellis.sync()
# the trellis can only be read every 17 millisecons or so
time.sleep(0.02)
| [
[
[
105,
109
],
[
1265,
1269
],
[
1339,
1343
],
[
1518,
1522
]
],
[
[
131,
134
],
[
273,
276
]
],
[
[
136,
139
],
[
278,
281
]
],
[
[
148,
153
],
[
263,
268
]
],
[
[
198,
208
],
[
318,
328
],
[
983,
993
],
[
1084,
1094
],
[
665,
675
],
[
813,
823
]
],
[
[
253,
260
],
[
329,
336
]
],
[
[
308,
315
],
[
959,
966
],
[
1060,
1067
],
[
1164,
1171
],
[
1233,
1240
],
[
1310,
1317
],
[
1435,
1442
],
[
698,
705
],
[
847,
854
]
],
[
[
367,
370
],
[
1330,
1333
],
[
878,
881
]
],
[
[
384,
387
]
],
[
[
403,
409
]
],
[
[
427,
432
]
],
[
[
448,
452
],
[
729,
733
]
],
[
[
470,
474
]
],
[
[
490,
496
],
[
1253,
1259
]
],
[
[
575,
580
],
[
1187,
1192
]
],
[
[
891,
892
],
[
980,
981
],
[
1081,
1082
],
[
1182,
1183
],
[
1248,
1249
]
],
[
[
1289,
1290
],
[
1325,
1326
]
]
] |
# @file VariableFormat_Test.py
# Unit test harness for the VariableFormat module/classes.
#
##
# Copyright (c) 2017, Microsoft Corporation
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
import unittest
import MuPythonLibrary.Uefi.EdkII.VariableFormat as VF
class TestVariableHeader(unittest.TestCase):
def test_set_name(self):
var = VF.VariableHeader()
test_name = "MyNewName"
var.set_name(test_name)
self.assertEqual(var.Name, test_name)
def test_get_packed_name(self):
var = VF.VariableHeader()
test_name = "MyNewName"
var.set_name(test_name)
test_name_packed = bytes.fromhex('4D0079004E00650077004E0061006D0065000000')
self.assertEqual(var.get_packed_name(), test_name_packed)
if __name__ == '__main__':
unittest.main()
| [
[
[
1444,
1452
],
[
1535,
1543
],
[
2052,
2060
]
],
[
[
1460,
1507
],
[
1599,
1601
],
[
1782,
1784
]
],
[
[
1516,
1534
]
]
] |
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaql.tests
class TestCommon(yaql.tests.TestCase):
def test_null(self):
self.assertIsNone(self.eval('null'))
def test_true(self):
res = self.eval('true')
self.assertTrue(res)
self.assertIsInstance(res, bool)
def test_false(self):
res = self.eval('false')
self.assertFalse(res)
self.assertIsInstance(res, bool)
def test_string(self):
self.assertEqual('True', self.eval('True'))
self.assertEqual('some string', self.eval("'some string'"))
def test_null_to_null(self):
self.assertTrue(self.eval('null = null'))
self.assertFalse(self.eval('null != null'))
self.assertTrue(self.eval('null <= null'))
self.assertTrue(self.eval('null >= null'))
self.assertFalse(self.eval('null < null'))
self.assertFalse(self.eval('null > null'))
def test_ordering(self):
self.assertTrue(self.eval('null < 0'))
self.assertTrue(self.eval('null < true'))
self.assertTrue(self.eval('null < false'))
self.assertTrue(self.eval('null < a'))
self.assertTrue(self.eval('null <= 0'))
self.assertFalse(self.eval('null > 0'))
self.assertFalse(self.eval('null >= 0'))
self.assertTrue(self.eval('null != 0'))
self.assertTrue(self.eval('null != false'))
self.assertFalse(self.eval('null = false'))
self.assertFalse(self.eval('null = 0'))
self.assertFalse(self.eval('0 < null'))
self.assertFalse(self.eval('0 <= null'))
self.assertTrue(self.eval('0 >= null'))
self.assertTrue(self.eval('0 > null'))
def test_max(self):
self.assertEqual(5, self.eval('max(1, 5)'))
self.assertEqual(-1, self.eval('max(null, -1)'))
self.assertIsNone(self.eval('max(null, null)'))
def test_min(self):
self.assertEqual(1, self.eval('min(1, 5)'))
self.assertIsNone(self.eval('min(null, -1)'))
self.assertIsNone(self.eval('min(null, null)'))
def test_comparision_of_incomparable(self):
self.assertFalse(self.eval('a = 1'))
self.assertFalse(self.eval('a = false'))
self.assertFalse(self.eval('a = null'))
self.assertFalse(self.eval('[a] = [false]'))
self.assertTrue(self.eval('a != 1'))
self.assertTrue(self.eval('a != false'))
self.assertTrue(self.eval('[a] != [false]'))
self.assertTrue(self.eval('a != null'))
| [
[
[
621,
631
],
[
651,
655
]
],
[
[
640,
650
]
]
] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions implementing Layer SavedModel serialization."""
from keras.mixed_precision import policy
from keras.saving.saved_model import base_serialization
from keras.saving.saved_model import constants
from keras.saving.saved_model import save_impl
from keras.saving.saved_model import serialized_attributes
from keras.utils import generic_utils
import tensorflow.compat.v2 as tf
class LayerSavedModelSaver(base_serialization.SavedModelSaver):
"""Implements Layer SavedModel serialization."""
@property
def object_identifier(self):
return constants.LAYER_IDENTIFIER
@property
def python_properties(self):
# TODO(kathywu): Add python property validator
return self._python_properties_internal()
def _python_properties_internal(self):
"""Returns dictionary of all python properties."""
# TODO(kathywu): Add support for metrics serialization.
# TODO(kathywu): Synchronize with the keras spec (go/keras-json-spec) once
# the python config serialization has caught up.
metadata = dict(
name=self.obj.name,
trainable=self.obj.trainable,
expects_training_arg=self.obj._expects_training_arg, # pylint: disable=protected-access
dtype=policy.serialize(self.obj._dtype_policy), # pylint: disable=protected-access
batch_input_shape=getattr(self.obj, '_batch_input_shape', None),
stateful=self.obj.stateful,
must_restore_from_config=self.obj._must_restore_from_config, # pylint: disable=protected-access
)
metadata.update(get_serialized(self.obj))
if self.obj.input_spec is not None:
# Layer's input_spec has already been type-checked in the property setter.
metadata['input_spec'] = tf.nest.map_structure(
lambda x: generic_utils.serialize_keras_object(x) if x else None,
self.obj.input_spec)
if (self.obj.activity_regularizer is not None and
hasattr(self.obj.activity_regularizer, 'get_config')):
metadata['activity_regularizer'] = generic_utils.serialize_keras_object(
self.obj.activity_regularizer)
if self.obj._build_input_shape is not None: # pylint: disable=protected-access
metadata['build_input_shape'] = self.obj._build_input_shape # pylint: disable=protected-access
return metadata
def objects_to_serialize(self, serialization_cache):
return (self._get_serialized_attributes(
serialization_cache).objects_to_serialize)
def functions_to_serialize(self, serialization_cache):
return (self._get_serialized_attributes(
serialization_cache).functions_to_serialize)
def _get_serialized_attributes(self, serialization_cache):
"""Generates or retrieves serialized attributes from cache."""
keras_cache = serialization_cache.setdefault(constants.KERAS_CACHE_KEY, {})
if self.obj in keras_cache:
return keras_cache[self.obj]
serialized_attr = keras_cache[self.obj] = (
serialized_attributes.SerializedAttributes.new(self.obj))
if (save_impl.should_skip_serialization(self.obj) or
self.obj._must_restore_from_config): # pylint: disable=protected-access
return serialized_attr
object_dict, function_dict = self._get_serialized_attributes_internal(
serialization_cache)
serialized_attr.set_and_validate_objects(object_dict)
serialized_attr.set_and_validate_functions(function_dict)
return serialized_attr
def _get_serialized_attributes_internal(self, serialization_cache):
"""Returns dictionary of serialized attributes."""
objects = save_impl.wrap_layer_objects(self.obj, serialization_cache)
functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)
# Attribute validator requires that the default save signature is added to
# function dict, even if the value is None.
functions['_default_save_signature'] = None
return objects, functions
# TODO(kathywu): Move serialization utils (and related utils from
# generic_utils.py) to a separate file.
def get_serialized(obj):
with generic_utils.skip_failed_serialization():
# Store the config dictionary, which may be used when reviving the object.
# When loading, the program will attempt to revive the object from config,
# and if that fails, the object will be revived from the SavedModel.
return generic_utils.serialize_keras_object(obj)
class InputLayerSavedModelSaver(base_serialization.SavedModelSaver):
"""InputLayer serialization."""
@property
def object_identifier(self):
return constants.INPUT_LAYER_IDENTIFIER
@property
def python_properties(self):
return dict(
class_name=type(self.obj).__name__,
name=self.obj.name,
dtype=self.obj.dtype,
sparse=self.obj.sparse,
ragged=self.obj.ragged,
batch_input_shape=self.obj._batch_input_shape, # pylint: disable=protected-access
config=self.obj.get_config())
def objects_to_serialize(self, serialization_cache):
return {}
def functions_to_serialize(self, serialization_cache):
return {}
class RNNSavedModelSaver(LayerSavedModelSaver):
"""RNN layer serialization."""
@property
def object_identifier(self):
return constants.RNN_LAYER_IDENTIFIER
def _get_serialized_attributes_internal(self, serialization_cache):
objects, functions = (
super(RNNSavedModelSaver, self)._get_serialized_attributes_internal(
serialization_cache))
states = tf.__internal__.tracking.wrap(self.obj.states)
# SaveModel require all the objects to be Trackable when saving.
# If the states is still a tuple after wrap_or_unwrap, it means it doesn't
# contain any trackable item within it, eg empty tuple or (None, None) for
# stateless ConvLSTM2D. We convert them to list so that wrap_or_unwrap can
# make it a Trackable again for saving. When loaded, ConvLSTM2D is
# able to handle the tuple/list conversion.
if isinstance(states, tuple):
states = tf.__internal__.tracking.wrap(list(states))
objects['states'] = states
return objects, functions
class VocabularySavedModelSaver(LayerSavedModelSaver):
"""Handles vocabulary layer serialization.
This class is needed for StringLookup, IntegerLookup, and TextVectorization,
which all have a vocabulary as part of the config. Currently, we keep this
vocab as part of the config until saving, when we need to clear it to avoid
initializing a StaticHashTable twice (once when restoring the config and once
when restoring restoring module resources). After clearing the vocab, we
presist a property to the layer indicating it was constructed with a vocab.
"""
@property
def python_properties(self):
# TODO(kathywu): Add python property validator
metadata = self._python_properties_internal()
# Clear the vocabulary from the config during saving.
metadata['config']['vocabulary'] = None
# Persist a property to track that a vocabulary was passed on construction.
metadata['config']['has_input_vocabulary'] = self.obj._has_input_vocabulary # pylint: disable=protected-access
return metadata
| [
[
[
797,
803
],
[
1912,
1918
]
],
[
[
841,
859
],
[
1114,
1132
],
[
5086,
5104
]
],
[
[
897,
906
],
[
1257,
1266
],
[
3468,
3477
],
[
5212,
5221
],
[
5882,
5891
]
],
[
[
944,
953
],
[
3690,
3699
],
[
4242,
4251
],
[
4318,
4327
]
],
[
[
991,
1012
],
[
3623,
3644
]
],
[
[
1037,
1050
],
[
2697,
2710
],
[
4725,
4738
],
[
5010,
5023
],
[
2452,
2465
]
],
[
[
1058,
1084
],
[
2409,
2411
],
[
6135,
6137
],
[
6656,
6658
]
],
[
[
1093,
1113
],
[
5771,
5791
],
[
6795,
6815
]
],
[
[
4697,
4711
],
[
2231,
2245
]
],
[
[
5060,
5085
]
],
[
[
5752,
5770
],
[
6025,
6043
]
],
[
[
6769,
6794
]
]
] |
import random
x=random.random()
print("The Random number is",round(x,3))
| [
[
[
7,
13
],
[
17,
23
]
],
[
[
15,
16
],
[
69,
70
]
]
] |
# Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import re
import logging
from cmframework.apis import cmerror
class CMPluginManager(object):
def __init__(self, plugins_path):
self.pluginlist = {}
self.filterdict = {}
self.plugins_path = plugins_path
# pylint: disable=no-self-use
def load_plugin(self):
raise cmerror.CMError('Not implemented')
# pylint: disable=no-self-use
def build_input(self, indata, filtername):
search_re = re.compile(filtername)
if isinstance(indata, dict):
filter_data = {}
for key, value in indata.iteritems():
logging.debug('Matching %s against %s', key, filtername)
if search_re.match(key):
filter_data[key] = value
else:
filter_data = []
for key in indata:
logging.debug('Matching %s against %s', key, filtername)
if search_re.match(key):
filter_data.append(key)
return filter_data
| [
[
[
629,
631
],
[
1069,
1071
]
],
[
[
639,
646
],
[
1224,
1231
],
[
1457,
1464
]
],
[
[
677,
684
],
[
932,
939
]
],
[
[
693,
708
]
]
] |
from ent2id.Ent2Id import *
| [
[
[
26,
27
]
]
] |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class EvaluationRequest(AbstractModel):
"""Evaluation请求参数结构体
"""
def __init__(self):
"""
:param SessionId: 图片唯一标识,一张图片一个SessionId;
:type SessionId: str
:param Image: 图片数据,需要使用base64对图片的二进制数据进行编码,与url参数二者填一即可;
:type Image: str
:param HcmAppid: 业务应用ID,与账号应用APPID无关,是用来方便客户管理服务的参数,新的 HcmAppid 可以在[控制台](https://console.cloud.tencent.com/hcm)【应用管理】下新建。
:type HcmAppid: str
:param Url: 图片url,与Image参数二者填一即可;
:type Url: str
:param SupportHorizontalImage: 横屏拍摄开关,若开启则支持传输横屏拍摄的图片;
:type SupportHorizontalImage: bool
:param RejectNonArithmeticImage: 拒绝非速算图(如风景图、人物图)开关,若开启,则遇到非速算图会快速返回拒绝的结果,但极端情况下可能会影响评估结果(比如算式截图贴到风景画里可能被判为非速算图直接返回了)。
:type RejectNonArithmeticImage: bool
:param IsAsync: 异步模式标识,0:同步模式,1:异步模式。默认为同步模式
:type IsAsync: int
:param EnableDispRelatedVertical: 是否展开耦合算式中的竖式计算
:type EnableDispRelatedVertical: bool
:param EnableDispMidresult: 是否展示竖式算式的中间结果和格式控制字符
:type EnableDispMidresult: bool
:param EnablePdfRecognize: 是否开启pdf识别,默认开启
:type EnablePdfRecognize: bool
:param PdfPageIndex: pdf页码,从0开始,默认为0
:type PdfPageIndex: int
"""
self.SessionId = None
self.Image = None
self.HcmAppid = None
self.Url = None
self.SupportHorizontalImage = None
self.RejectNonArithmeticImage = None
self.IsAsync = None
self.EnableDispRelatedVertical = None
self.EnableDispMidresult = None
self.EnablePdfRecognize = None
self.PdfPageIndex = None
def _deserialize(self, params):
self.SessionId = params.get("SessionId")
self.Image = params.get("Image")
self.HcmAppid = params.get("HcmAppid")
self.Url = params.get("Url")
self.SupportHorizontalImage = params.get("SupportHorizontalImage")
self.RejectNonArithmeticImage = params.get("RejectNonArithmeticImage")
self.IsAsync = params.get("IsAsync")
self.EnableDispRelatedVertical = params.get("EnableDispRelatedVertical")
self.EnableDispMidresult = params.get("EnableDispMidresult")
self.EnablePdfRecognize = params.get("EnablePdfRecognize")
self.PdfPageIndex = params.get("PdfPageIndex")
class EvaluationResponse(AbstractModel):
"""Evaluation返回参数结构体
"""
def __init__(self):
"""
:param SessionId: 图片唯一标识,一张图片一个SessionId;
:type SessionId: str
:param Items: 识别出的算式信息;
注意:此字段可能返回 null,表示取不到有效值。
:type Items: list of Item
:param TaskId: 任务 id,用于查询接口
:type TaskId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SessionId = None
self.Items = None
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.SessionId = params.get("SessionId")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = Item()
obj._deserialize(item)
self.Items.append(obj)
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class Item(AbstractModel):
"""识别出的算术式信息及评估结果
"""
def __init__(self):
"""
:param Item: 识别的算式是否正确
:type Item: str
:param ItemString: 识别的算式
:type ItemString: str
:param ItemCoord: 识别的算式在图片上的位置信息
:type ItemCoord: :class:`tencentcloud.hcm.v20181106.models.ItemCoord`
:param Answer: 推荐的答案,暂不支持多个关系运算符、无关系运算符、单位换算错题的推荐答案返回。
:type Answer: str
:param ExpressionType: 算式题型编号,如加减乘除四则题型,具体题型及编号如下:1 加减乘除四则 2 加减乘除已知结果求运算因子3 判断大小 4 约等于估算 5 带余数除法 6 分数四则运算 7 单位换算 8 竖式加减法 9 竖式乘除法 10 脱式计算 11 解方程
注意:此字段可能返回 null,表示取不到有效值。
:type ExpressionType: str
"""
self.Item = None
self.ItemString = None
self.ItemCoord = None
self.Answer = None
self.ExpressionType = None
def _deserialize(self, params):
self.Item = params.get("Item")
self.ItemString = params.get("ItemString")
if params.get("ItemCoord") is not None:
self.ItemCoord = ItemCoord()
self.ItemCoord._deserialize(params.get("ItemCoord"))
self.Answer = params.get("Answer")
self.ExpressionType = params.get("ExpressionType")
class ItemCoord(AbstractModel):
"""目标算式在图片上的坐标信息
"""
def __init__(self):
"""
:param Height: 算式高度
:type Height: int
:param Width: 算式宽度
:type Width: int
:param X: 算式图的左上角横坐标
:type X: int
:param Y: 算式图的左上角纵坐标
:type Y: int
"""
self.Height = None
self.Width = None
self.X = None
self.Y = None
def _deserialize(self, params):
self.Height = params.get("Height")
self.Width = params.get("Width")
self.X = params.get("X")
self.Y = params.get("Y") | [
[
[
699,
712
],
[
739,
752
],
[
3047,
3060
],
[
4006,
4019
],
[
5190,
5203
]
],
[
[
721,
738
]
],
[
[
3028,
3046
]
],
[
[
4001,
4005
],
[
3816,
3820
]
],
[
[
5180,
5189
],
[
4993,
5002
]
]
] |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import logging
import unittest.mock as mock
from unittest import TestCase
from esrally import exceptions
from esrally.utils import git
class GitTests(TestCase):
def test_is_git_working_copy(self):
test_dir = os.path.dirname(os.path.dirname(__file__))
# this test is assuming that nobody stripped the git repo info in their Rally working copy
self.assertFalse(git.is_working_copy(test_dir))
self.assertTrue(git.is_working_copy(os.path.dirname(test_dir)))
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_git_version_too_old(self, run_subprocess_with_logging, run_subprocess):
# any non-zero return value will do
run_subprocess_with_logging.return_value = 64
run_subprocess.return_value = "1.0.0"
with self.assertRaises(exceptions.SystemSetupError) as ctx:
git.head_revision("/src")
self.assertEqual("Your git version is [1.0.0] but Rally requires at least git 1.9. Please update git.", ctx.exception.args[0])
run_subprocess_with_logging.assert_called_with("git -C /src --version", level=logging.DEBUG)
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_clone_successful(self, run_subprocess_with_logging, ensure_dir):
run_subprocess_with_logging.return_value = 0
src = "/src"
remote = "http://github.com/some/project"
git.clone(src, remote)
ensure_dir.assert_called_with(src)
run_subprocess_with_logging.assert_called_with("git clone http://github.com/some/project /src")
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_clone_with_error(self, run_subprocess_with_logging, ensure_dir):
run_subprocess_with_logging.return_value = 128
src = "/src"
remote = "http://github.com/some/project"
with self.assertRaises(exceptions.SupplyError) as ctx:
git.clone(src, remote)
self.assertEqual("Could not clone from [http://github.com/some/project] to [/src]", ctx.exception.args[0])
ensure_dir.assert_called_with(src)
run_subprocess_with_logging.assert_called_with("git clone http://github.com/some/project /src")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_fetch_successful(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.fetch("/src", remote="my-origin")
run_subprocess.assert_called_with("git -C /src fetch --prune --quiet my-origin")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_fetch_with_error(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = True
with self.assertRaises(exceptions.SupplyError) as ctx:
git.fetch("/src", remote="my-origin")
self.assertEqual("Could not fetch source tree from [my-origin]", ctx.exception.args[0])
run_subprocess.assert_called_with("git -C /src fetch --prune --quiet my-origin")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_checkout_successful(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.checkout("/src", "feature-branch")
run_subprocess.assert_called_with("git -C /src checkout --quiet feature-branch")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_checkout_with_error(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = True
with self.assertRaises(exceptions.SupplyError) as ctx:
git.checkout("/src", "feature-branch")
self.assertEqual("Could not checkout branch [feature-branch]. Do you have uncommitted changes?", ctx.exception.args[0])
run_subprocess.assert_called_with("git -C /src checkout --quiet feature-branch")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_rebase(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.rebase("/src", remote="my-origin", branch="feature-branch")
calls = [
mock.call("git -C /src checkout --quiet feature-branch"),
mock.call("git -C /src rebase --quiet my-origin/feature-branch")
]
run_subprocess.assert_has_calls(calls)
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_pull(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.pull("/src", remote="my-origin", branch="feature-branch")
calls = [
mock.call("git -C /src fetch --prune --quiet my-origin"),
mock.call("git -C /src checkout --quiet feature-branch"),
mock.call("git -C /src rebase --quiet my-origin/feature-branch")
]
run_subprocess.assert_has_calls(calls)
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_pull_ts(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.pull_ts("/src", "20160101T110000Z")
run_subprocess.assert_called_with(
"git -C /src fetch --quiet origin && git -C /src checkout "
"--quiet `git -C /src rev-list -n 1 --before=\"20160101T110000Z\" --date=iso8601 origin/master`")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_pull_revision(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.pull_revision("/src", "3694a07")
run_subprocess.assert_called_with("git -C /src fetch --quiet origin && git -C /src checkout --quiet 3694a07")
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_head_revision(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = ["3694a07"]
self.assertEqual("3694a07", git.head_revision("/src"))
run_subprocess.assert_called_with("git -C /src rev-parse --short HEAD")
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_list_remote_branches(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = [" origin/HEAD",
" origin/master",
" origin/5.0.0-alpha1",
" origin/5"]
self.assertEqual(["master", "5.0.0-alpha1", "5"], git.branches("/src", remote=True))
run_subprocess.assert_called_with("git -C /src for-each-ref refs/remotes/ --format='%(refname:short)'")
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_list_local_branches(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = [" HEAD",
" master",
" 5.0.0-alpha1",
" 5"]
self.assertEqual(["master", "5.0.0-alpha1", "5"], git.branches("/src", remote=False))
run_subprocess.assert_called_with("git -C /src for-each-ref refs/heads/ --format='%(refname:short)'")
| [
[
[
781,
783
],
[
1007,
1009
],
[
1023,
1025
],
[
1249,
1251
]
],
[
[
791,
798
],
[
1972,
1979
]
],
[
[
806,
827
],
[
1283,
1287
],
[
1351,
1355
],
[
1993,
1997
],
[
2040,
2044
],
[
2492,
2496
],
[
2539,
2543
],
[
3175,
3179
],
[
3231,
3235
],
[
3615,
3619
],
[
3671,
3675
],
[
4217,
4221
],
[
4273,
4277
],
[
4661,
4665
],
[
4717,
4721
],
[
5299,
5303
],
[
5355,
5359
],
[
5888,
5892
],
[
5944,
5948
],
[
6543,
6547
],
[
6599,
6603
],
[
7120,
7124
],
[
7176,
7180
],
[
7585,
7589
],
[
7653,
7657
],
[
8048,
8052
],
[
8116,
8120
],
[
8761,
8765
],
[
8829,
8833
],
[
5690,
5694
],
[
5760,
5764
],
[
6275,
6279
],
[
6345,
6349
],
[
6415,
6419
]
],
[
[
849,
857
],
[
937,
945
]
],
[
[
879,
889
],
[
1676,
1686
],
[
2839,
2849
],
[
3944,
3954
],
[
4993,
5003
]
],
[
[
916,
919
],
[
1174,
1177
],
[
1229,
1232
],
[
1725,
1728
],
[
2315,
2318
],
[
2883,
2886
],
[
3482,
3485
],
[
3988,
3991
],
[
4527,
4530
],
[
5037,
5040
],
[
5596,
5599
],
[
6183,
6186
],
[
6841,
6844
],
[
7424,
7427
],
[
7935,
7938
],
[
8608,
8611
],
[
9292,
9295
]
],
[
[
928,
936
]
]
] |
import argparse
import time
import ray
ray.init(address="auto")
parser = argparse.ArgumentParser()
parser.add_argument(
"num_nodes", type=int, help="Wait for this number of nodes (includes head)"
)
parser.add_argument("max_time_s", type=int, help="Wait for this number of seconds")
parser.add_argument(
"--feedback_interval_s",
type=int,
default=10,
help="Wait for this number of seconds",
)
args = parser.parse_args()
curr_nodes = 0
start = time.time()
next_feedback = start
max_time = start + args.max_time_s
while not curr_nodes >= args.num_nodes:
now = time.time()
if now >= max_time:
raise RuntimeError(
f"Maximum wait time reached, but only "
f"{curr_nodes}/{args.num_nodes} nodes came up. Aborting."
)
if now >= next_feedback:
passed = now - start
print(
f"Waiting for more nodes to come up: "
f"{curr_nodes}/{args.num_nodes} "
f"({passed:.0f} seconds passed)"
)
next_feedback = now + args.feedback_interval_s
time.sleep(5)
curr_nodes = len(ray.nodes())
passed = time.time() - start
print(
f"Cluster is up: {curr_nodes}/{args.num_nodes} nodes online after "
f"{passed:.0f} seconds"
)
| [
[
[
7,
15
],
[
76,
84
]
],
[
[
23,
27
],
[
469,
473
],
[
589,
593
],
[
1072,
1076
],
[
1130,
1134
]
],
[
[
36,
39
],
[
41,
44
],
[
1107,
1110
]
],
[
[
67,
73
],
[
102,
108
],
[
206,
212
],
[
291,
297
],
[
425,
431
]
],
[
[
418,
422
],
[
522,
526
],
[
563,
567
],
[
734,
738
],
[
939,
943
],
[
1042,
1046
],
[
1192,
1196
]
],
[
[
446,
456
],
[
549,
559
],
[
721,
731
],
[
926,
936
],
[
1179,
1189
]
],
[
[
461,
466
],
[
497,
502
],
[
514,
519
],
[
839,
844
],
[
1144,
1149
]
],
[
[
481,
494
],
[
801,
814
]
],
[
[
503,
511
],
[
616,
624
]
],
[
[
583,
586
],
[
609,
612
],
[
794,
797
],
[
833,
836
],
[
1036,
1039
]
],
[
[
824,
830
],
[
973,
979
]
],
[
[
1020,
1033
],
[
801,
814
]
],
[
[
1090,
1100
],
[
549,
559
],
[
721,
731
],
[
926,
936
],
[
1179,
1189
]
],
[
[
1121,
1127
],
[
1236,
1242
]
]
] |
import itertools
from multiprocessing import Manager
from pyaugmecon.options import Options
class Flag(object):
def __init__(self, opts: Options):
self.opts = opts
if self.opts.shared_flag:
self.flag = Manager().dict()
else:
self.flag = {}
def set(self, flag_range, value, iter):
indices = [tuple([n for n in flag_range(o)]) for o in iter]
iter = list(itertools.product(*indices))
tmp_flag = {}
for gp in iter:
tmp_flag[gp] = value
self.flag.update(tmp_flag)
def get(self, i):
return self.flag.get(i, 0)
| [
[
[
7,
16
],
[
428,
437
]
],
[
[
45,
52
],
[
237,
244
]
],
[
[
84,
91
],
[
143,
150
]
],
[
[
100,
104
]
]
] |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import os
from setuptools import setup, find_packages
__version__ = '3.1.0'
requirements_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'requirements.txt')
with open(requirements_path) as requirements_file:
requirements = requirements_file.readlines()
kafka = ['confluent-kafka==1.0.0']
cassandra = ['cassandra-driver==3.20.1']
glue = ['boto3==1.10.1']
snowflake = [
'snowflake-connector-python',
'snowflake-sqlalchemy'
]
athena = ['PyAthena[SQLAlchemy]>=1.0.0']
# Python API client for google
# License: Apache Software License
# Upstream url: https://github.com/googleapis/google-api-python-client
bigquery = [
'google-api-python-client>=1.6.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1'
'google-auth>=1.0.0, <2.0.0dev'
]
jsonpath = ['jsonpath_rw==1.4.0']
db2 = [
'ibm_db==3.0.1',
'ibm-db-sa-py3==0.3.1-1'
]
druid = [
'pydruid'
]
all_deps = requirements + kafka + cassandra + glue + snowflake + athena + bigquery + jsonpath + db2 + druid
setup(
name='amundsen-databuilder',
version=__version__,
description='Amundsen Data builder',
url='https://www.github.com/amundsen-io/amundsendatabuilder',
maintainer='Amundsen TSC',
maintainer_email='amundsen-tsc@lists.lfai.foundation',
packages=find_packages(exclude=['tests*']),
dependency_links=[],
install_requires=requirements,
python_requires='>=3.6,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*',
extras_require={
':python_version=="2.7"': ['typing>=3.6'], # allow typehinting PY2
'all': all_deps,
'kafka': kafka, # To use with Kafka source extractor
'cassandra': cassandra,
'glue': glue,
'snowflake': snowflake,
'athena': athena,
'bigquery': bigquery,
'jsonpath': jsonpath,
'db2': db2,
'druid': druid,
},
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| [
[
[
96,
98
],
[
188,
190
],
[
201,
203
],
[
217,
219
]
],
[
[
122,
127
],
[
1096,
1101
]
],
[
[
129,
142
],
[
1371,
1384
]
],
[
[
145,
156
],
[
1148,
1159
]
],
[
[
168,
185
],
[
276,
293
]
],
[
[
298,
315
],
[
336,
353
]
],
[
[
321,
333
],
[
998,
1010
],
[
1452,
1464
]
],
[
[
367,
372
],
[
1013,
1018
],
[
1682,
1687
]
],
[
[
403,
412
],
[
1021,
1030
],
[
1748,
1757
]
],
[
[
445,
449
],
[
1033,
1037
],
[
1775,
1779
]
],
[
[
471,
480
],
[
1040,
1049
],
[
1802,
1811
]
],
[
[
549,
555
],
[
1052,
1058
],
[
1831,
1837
]
],
[
[
728,
736
],
[
1061,
1069
],
[
1859,
1867
]
],
[
[
864,
872
],
[
1072,
1080
],
[
1889,
1897
]
],
[
[
899,
902
],
[
1083,
1086
],
[
1914,
1917
]
],
[
[
960,
965
],
[
1089,
1094
],
[
1936,
1941
]
],
[
[
987,
995
],
[
1655,
1663
]
]
] |
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: apihelp@mailchimp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EcommerceProductImage2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'url': 'str',
'variant_ids': 'list[str]'
}
attribute_map = {
'id': 'id',
'url': 'url',
'variant_ids': 'variant_ids'
}
def __init__(self, id=None, url=None, variant_ids=None): # noqa: E501
"""EcommerceProductImage2 - a model defined in Swagger""" # noqa: E501
self._id = None
self._url = None
self._variant_ids = None
self.discriminator = None
if id is not None:
self.id = id
if url is not None:
self.url = url
if variant_ids is not None:
self.variant_ids = variant_ids
@property
def id(self):
"""Gets the id of this EcommerceProductImage2. # noqa: E501
A unique identifier for the product image. # noqa: E501
:return: The id of this EcommerceProductImage2. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EcommerceProductImage2.
A unique identifier for the product image. # noqa: E501
:param id: The id of this EcommerceProductImage2. # noqa: E501
:type: str
"""
self._id = id
@property
def url(self):
"""Gets the url of this EcommerceProductImage2. # noqa: E501
The URL for a product image. # noqa: E501
:return: The url of this EcommerceProductImage2. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this EcommerceProductImage2.
The URL for a product image. # noqa: E501
:param url: The url of this EcommerceProductImage2. # noqa: E501
:type: str
"""
self._url = url
@property
def variant_ids(self):
"""Gets the variant_ids of this EcommerceProductImage2. # noqa: E501
The list of product variants using the image. # noqa: E501
:return: The variant_ids of this EcommerceProductImage2. # noqa: E501
:rtype: list[str]
"""
return self._variant_ids
@variant_ids.setter
def variant_ids(self, variant_ids):
"""Sets the variant_ids of this EcommerceProductImage2.
The list of product variants using the image. # noqa: E501
:param variant_ids: The variant_ids of this EcommerceProductImage2. # noqa: E501
:type: list[str]
"""
self._variant_ids = variant_ids
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EcommerceProductImage2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EcommerceProductImage2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
[
[
321,
327
],
[
4386,
4392
]
],
[
[
335,
337
]
],
[
[
360,
363
],
[
3449,
3452
]
],
[
[
372,
394
],
[
4153,
4175
],
[
4626,
4648
]
]
] |
from optparse import OptionParser
import json
def main():
usage = "" # TODO
parser = OptionParser(usage=usage)
#parser.add_option("-a", "--a_descrip", action="store_true", help="This is a flat")
#parser.add_option("-b", "--b_descrip", help="This is an argument")
(options, args) = parser.parse_args()
with open('transfected_sample_raw_metadata.json', 'r') as f:
sample_to_key_to_val = json.load(f)
gene_id_to_symbol = {}
gene_id_to_name = {}
with open('genes.tsv', 'r') as f:
for i,l in enumerate(f):
if i == 0:
continue
toks = l.split()
g_id = toks[0]
symbol = toks[1]
gene_id_to_symbol[g_id] = symbol
if len(toks) == 3:
name = toks[2]
gene_id_to_name[g_id] = name
if __name__ == "__main__":
main()
| [
[
[
21,
33
],
[
95,
107
]
],
[
[
41,
45
],
[
420,
424
]
],
[
[
51,
55
],
[
901,
905
]
]
] |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: contact.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='contact.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\rcontact.proto\"\xdb\x01\n\x07\x43ontact\x12\x12\n\nfirst_name\x18\x01 \x01(\t\x12\x11\n\tlast_name\x18\x02 \x01(\t\x12\x14\n\x0ctwitter_name\x18\x03 \x01(\t\x12\r\n\x05\x65mail\x18\x04 \x01(\t\x12\x13\n\x0bgithub_link\x18\x05 \x01(\t\x12\"\n\x04type\x18\x06 \x01(\x0e\x32\x14.Contact.ContactType\x12\x11\n\timageName\x18\x07 \x01(\t\"8\n\x0b\x43ontactType\x12\x0b\n\x07SPEAKER\x10\x00\x12\r\n\tATTENDANT\x10\x01\x12\r\n\tVOLUNTEER\x10\x02\"&\n\x08Speakers\x12\x1a\n\x08\x63ontacts\x18\x01 \x03(\x0b\x32\x08.Contactb\x06proto3')
)
_CONTACT_CONTACTTYPE = _descriptor.EnumDescriptor(
name='ContactType',
full_name='Contact.ContactType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SPEAKER', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ATTENDANT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VOLUNTEER', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=181,
serialized_end=237,
)
_sym_db.RegisterEnumDescriptor(_CONTACT_CONTACTTYPE)
_CONTACT = _descriptor.Descriptor(
name='Contact',
full_name='Contact',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='first_name', full_name='Contact.first_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_name', full_name='Contact.last_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='twitter_name', full_name='Contact.twitter_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='email', full_name='Contact.email', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='github_link', full_name='Contact.github_link', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='Contact.type', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='imageName', full_name='Contact.imageName', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CONTACT_CONTACTTYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=18,
serialized_end=237,
)
_SPEAKERS = _descriptor.Descriptor(
name='Speakers',
full_name='Speakers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='contacts', full_name='Speakers.contacts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=239,
serialized_end=277,
)
_CONTACT.fields_by_name['type'].enum_type = _CONTACT_CONTACTTYPE
_CONTACT_CONTACTTYPE.containing_type = _CONTACT
_SPEAKERS.fields_by_name['contacts'].message_type = _CONTACT
DESCRIPTOR.message_types_by_name['Contact'] = _CONTACT
DESCRIPTOR.message_types_by_name['Speakers'] = _SPEAKERS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Contact = _reflection.GeneratedProtocolMessageType('Contact', (_message.Message,), dict(
DESCRIPTOR = _CONTACT,
__module__ = 'contact_pb2'
# @@protoc_insertion_point(class_scope:Contact)
))
_sym_db.RegisterMessage(Contact)
Speakers = _reflection.GeneratedProtocolMessageType('Speakers', (_message.Message,), dict(
DESCRIPTOR = _SPEAKERS,
__module__ = 'contact_pb2'
# @@protoc_insertion_point(class_scope:Speakers)
))
_sym_db.RegisterMessage(Speakers)
# @@protoc_insertion_point(module_scope)
| [
[
[
91,
94
],
[
98,
101
]
],
[
[
95,
97
],
[
625,
627
],
[
2157,
2159
],
[
2500,
2502
],
[
2849,
2851
],
[
3184,
3186
],
[
3531,
3533
],
[
4187,
4189
]
],
[
[
198,
223
],
[
524,
535
],
[
1189,
1200
],
[
1325,
1336
],
[
1441,
1452
],
[
1559,
1570
],
[
1832,
1843
],
[
1972,
1983
],
[
2317,
2328
],
[
2660,
2671
],
[
3009,
3020
],
[
3344,
3355
],
[
3691,
3702
],
[
4004,
4015
],
[
4591,
4602
],
[
4733,
4744
]
],
[
[
252,
271
],
[
5656,
5664
],
[
5890,
5898
]
],
[
[
300,
325
],
[
5603,
5614
],
[
5836,
5847
]
],
[
[
354,
389
],
[
480,
496
]
],
[
[
418,
432
]
],
[
[
470,
477
],
[
1766,
1773
],
[
5549,
5556
],
[
5791,
5798
],
[
6027,
6034
]
],
[
[
511,
521
],
[
1298,
1308
],
[
1921,
1931
],
[
4682,
4692
],
[
5437,
5447
],
[
5492,
5502
],
[
5580,
5590
]
],
[
[
1166,
1186
],
[
1797,
1817
],
[
4406,
4426
],
[
5307,
5327
],
[
5328,
5348
]
],
[
[
1821,
1829
],
[
5263,
5271
],
[
5367,
5375
],
[
5428,
5436
],
[
5483,
5491
],
[
5697,
5705
]
],
[
[
4579,
4588
],
[
5376,
5385
],
[
5539,
5548
],
[
5931,
5940
]
],
[
[
5593,
5600
],
[
5815,
5822
]
],
[
[
5825,
5833
],
[
6051,
6059
]
]
] |
import netbox_agent.dmidecode as dmidecode
from netbox_agent.config import config
from netbox_agent.config import netbox_instance as nb
from netbox_agent.inventory import Inventory
from netbox_agent.location import Datacenter, Rack, Tenant
from netbox_agent.misc import create_netbox_tags, get_device_role, get_device_type, get_device_platform
from netbox_agent.network import ServerNetwork
from netbox_agent.power import PowerSupply
from pprint import pprint
import subprocess
import logging
import socket
import sys
class ServerBase():
def __init__(self, dmi=None):
if dmi:
self.dmi = dmi
else:
self.dmi = dmidecode.parse()
self.baseboard = dmidecode.get_by_type(self.dmi, 'Baseboard')
self.bios = dmidecode.get_by_type(self.dmi, 'BIOS')
self.chassis = dmidecode.get_by_type(self.dmi, 'Chassis')
self.system = dmidecode.get_by_type(self.dmi, 'System')
self.device_platform = get_device_platform(config.device.platform)
self.network = None
self.tags = list(set([
x.strip() for x in config.device.tags.split(',') if x.strip()
])) if config.device.tags else []
self.nb_tags = list(create_netbox_tags(self.tags))
config_cf = set([
f.strip() for f in config.device.custom_fields.split(",")
if f.strip()
])
self.custom_fields = {}
self.custom_fields.update(dict([
(k.strip(), v.strip()) for k, v in
[f.split("=", 1) for f in config_cf]
]))
def get_tenant(self):
tenant = Tenant()
return tenant.get()
def get_netbox_tenant(self):
tenant = self.get_tenant()
if tenant is None:
return None
nb_tenant = nb.tenancy.tenants.get(
slug=self.get_tenant()
)
return nb_tenant
def get_datacenter(self):
dc = Datacenter()
return dc.get()
def get_netbox_datacenter(self):
dc = self.get_datacenter()
if dc is None:
logging.error("Specificing a datacenter (Site) is mandatory in Netbox")
sys.exit(1)
nb_dc = nb.dcim.sites.get(
slug=dc,
)
if nb_dc is None:
logging.error("Site (slug: {}) has not been found".format(dc))
sys.exit(1)
return nb_dc
def update_netbox_location(self, server):
dc = self.get_datacenter()
nb_rack = self.get_netbox_rack()
nb_dc = self.get_netbox_datacenter()
update = False
if dc and server.site and server.site.slug != nb_dc.slug:
logging.info('Datacenter location has changed from {} to {}, updating'.format(
server.site.slug,
nb_dc.slug,
))
update = True
server.site = nb_dc.id
if (
server.rack
and nb_rack
and server.rack.id != nb_rack.id
):
logging.info('Rack location has changed from {} to {}, updating'.format(
server.rack,
nb_rack,
))
update = True
server.rack = nb_rack
if nb_rack is None:
server.face = None
server.position = None
return update, server
def update_netbox_expansion_location(self, server, expansion):
update = False
if expansion.tenant != server.tenant:
expansion.tenant = server.tenant
update = True
if expansion.site != server.site:
expansion.site = server.site
update = True
if expansion.rack != server.rack:
expansion.rack = server.rack
update = True
return update
def get_rack(self):
rack = Rack()
return rack.get()
def get_netbox_rack(self):
rack = self.get_rack()
datacenter = self.get_netbox_datacenter()
if not rack:
return None
if rack and not datacenter:
logging.error("Can't get rack if no datacenter is configured or found")
sys.exit(1)
return nb.dcim.racks.get(
name=rack,
site_id=datacenter.id,
)
def get_product_name(self):
"""
Return the Chassis Name from dmidecode info
"""
return self.system[0]['Product Name'].strip()
def get_service_tag(self):
"""
Return the Service Tag from dmidecode info
"""
return self.system[0]['Serial Number'].strip()
def get_expansion_service_tag(self):
"""
Return the virtual Service Tag from dmidecode info host
with 'expansion'
"""
return self.system[0]['Serial Number'].strip() + " expansion"
def get_hostname(self):
if config.hostname_cmd is None:
return '{}'.format(socket.gethostname())
return subprocess.getoutput(config.hostname_cmd)
def is_blade(self):
raise NotImplementedError
def get_blade_slot(self):
raise NotImplementedError
def get_chassis(self):
raise NotImplementedError
def get_chassis_name(self):
raise NotImplementedError
def get_chassis_service_tag(self):
raise NotImplementedError
def get_bios_version(self):
raise NotImplementedError
def get_bios_version_attr(self):
raise NotImplementedError
def get_bios_release_date(self):
raise NotImplementedError
def get_power_consumption(self):
raise NotImplementedError
def get_expansion_product(self):
raise NotImplementedError
def _netbox_create_chassis(self, datacenter, tenant, rack):
device_type = get_device_type(self.get_chassis())
device_role = get_device_role(config.device.chassis_role)
serial = self.get_chassis_service_tag()
logging.info('Creating chassis blade (serial: {serial})'.format(
serial=serial))
new_chassis = nb.dcim.devices.create(
name=self.get_chassis_name(),
device_type=device_type.id,
serial=serial,
device_role=device_role.id,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
custom_fields=self.custom_fields,
)
return new_chassis
def _netbox_create_blade(self, chassis, datacenter, tenant, rack):
device_role = get_device_role(config.device.blade_role)
device_type = get_device_type(self.get_product_name())
serial = self.get_service_tag()
hostname = self.get_hostname()
logging.info(
'Creating blade (serial: {serial}) {hostname} on chassis {chassis_serial}'.format(
serial=serial, hostname=hostname, chassis_serial=chassis.serial
))
new_blade = nb.dcim.devices.create(
name=hostname,
serial=serial,
device_role=device_role.id,
device_type=device_type.id,
parent_device=chassis.id,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
custom_fields=self.custom_fields,
)
return new_blade
def _netbox_create_blade_expansion(self, chassis, datacenter, tenant, rack):
device_role = get_device_role(config.device.blade_role)
device_type = get_device_type(self.get_expansion_product())
serial = self.get_expansion_service_tag()
hostname = self.get_hostname() + " expansion"
logging.info(
'Creating expansion (serial: {serial}) {hostname} on chassis {chassis_serial}'.format(
serial=serial, hostname=hostname, chassis_serial=chassis.serial
))
new_blade = nb.dcim.devices.create(
name=hostname,
serial=serial,
device_role=device_role.id,
device_type=device_type.id,
parent_device=chassis.id,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
)
return new_blade
def _netbox_deduplicate_server(self):
serial = self.get_service_tag()
hostname = self.get_hostname()
server = nb.dcim.devices.get(name=hostname)
if server and server.serial != serial:
server.delete()
def _netbox_create_server(self, datacenter, tenant, rack):
device_role = get_device_role(config.device.server_role)
device_type = get_device_type(self.get_product_name())
if not device_type:
raise Exception('Chassis "{}" doesn\'t exist'.format(self.get_chassis()))
serial = self.get_service_tag()
hostname = self.get_hostname()
logging.info('Creating server (serial: {serial}) {hostname}'.format(
serial=serial, hostname=hostname))
new_server = nb.dcim.devices.create(
name=hostname,
serial=serial,
device_role=device_role.id,
device_type=device_type.id,
platform=self.device_platform,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
)
return new_server
def get_netbox_server(self, expansion=False):
if expansion is False:
return nb.dcim.devices.get(serial=self.get_service_tag())
else:
return nb.dcim.devices.get(serial=self.get_expansion_service_tag())
def _netbox_set_or_update_blade_slot(self, server, chassis, datacenter):
# before everything check if right chassis
actual_device_bay = server.parent_device.device_bay \
if server.parent_device else None
actual_chassis = actual_device_bay.device \
if actual_device_bay else None
slot = self.get_blade_slot()
if actual_chassis and \
actual_chassis.serial == chassis.serial and \
actual_device_bay.name == slot:
return
real_device_bays = nb.dcim.device_bays.filter(
device_id=chassis.id,
name=slot,
)
real_device_bays = nb.dcim.device_bays.filter(
device_id=chassis.id,
name=slot,
)
if real_device_bays:
logging.info(
'Setting device ({serial}) new slot on {slot} '
'(Chassis {chassis_serial})..'.format(
serial=server.serial, slot=slot, chassis_serial=chassis.serial
))
# reset actual device bay if set
if actual_device_bay:
# Forces the evaluation of the installed_device attribute to
# workaround a bug probably due to lazy loading optimization
# that prevents the value change detection
actual_device_bay.installed_device
actual_device_bay.installed_device = None
actual_device_bay.save()
# setup new device bay
real_device_bay = next(real_device_bays)
real_device_bay.installed_device = server
real_device_bay.save()
else:
logging.error('Could not find slot {slot} for chassis'.format(
slot=slot
))
def _netbox_set_or_update_blade_expansion_slot(self, expansion, chassis, datacenter):
# before everything check if right chassis
actual_device_bay = expansion.parent_device.device_bay if expansion.parent_device else None
actual_chassis = actual_device_bay.device if actual_device_bay else None
slot = self.get_blade_expansion_slot()
if actual_chassis and \
actual_chassis.serial == chassis.serial and \
actual_device_bay.name == slot:
return
real_device_bays = nb.dcim.device_bays.filter(
device_id=chassis.id,
name=slot,
)
if not real_device_bays:
logging.error('Could not find slot {slot} expansion for chassis'.format(
slot=slot
))
return
logging.info(
'Setting device expansion ({serial}) new slot on {slot} '
'(Chassis {chassis_serial})..'.format(
serial=expansion.serial, slot=slot, chassis_serial=chassis.serial
))
# reset actual device bay if set
if actual_device_bay:
# Forces the evaluation of the installed_device attribute to
# workaround a bug probably due to lazy loading optimization
# that prevents the value change detection
actual_device_bay.installed_device
actual_device_bay.installed_device = None
actual_device_bay.save()
# setup new device bay
real_device_bay = next(real_device_bays)
real_device_bay.installed_device = expansion
real_device_bay.save()
def netbox_create_or_update(self, config):
"""
Netbox method to create or update info about our server/blade
Handle:
* new chassis for a blade
* new slot for a blade
* hostname update
* Network infos
* Inventory management
* PSU management
"""
datacenter = self.get_netbox_datacenter()
rack = self.get_netbox_rack()
tenant = self.get_netbox_tenant()
if config.purge_old_devices:
self._netbox_deduplicate_server()
if self.is_blade():
chassis = nb.dcim.devices.get(
serial=self.get_chassis_service_tag()
)
# Chassis does not exist
if not chassis:
chassis = self._netbox_create_chassis(datacenter, tenant, rack)
server = nb.dcim.devices.get(serial=self.get_service_tag())
if not server:
server = self._netbox_create_blade(chassis, datacenter, tenant, rack)
# Set slot for blade
self._netbox_set_or_update_blade_slot(server, chassis, datacenter)
else:
server = nb.dcim.devices.get(serial=self.get_service_tag())
if not server:
server = self._netbox_create_server(datacenter, tenant, rack)
logging.debug('Updating Server...')
# check network cards
if config.register or config.update_all or config.update_network:
self.network = ServerNetwork(server=self)
self.network.create_or_update_netbox_network_cards()
update_inventory = config.inventory and (config.register or
config.update_all or config.update_inventory)
# update inventory if feature is enabled
self.inventory = Inventory(server=self)
if update_inventory:
self.inventory.create_or_update()
# update psu
if config.register or config.update_all or config.update_psu:
self.power = PowerSupply(server=self)
self.power.create_or_update_power_supply()
self.power.report_power_consumption()
expansion = nb.dcim.devices.get(serial=self.get_expansion_service_tag())
if self.own_expansion_slot() and config.expansion_as_device:
logging.debug('Update Server expansion...')
if not expansion:
expansion = self._netbox_create_blade_expansion(chassis, datacenter, tenant, rack)
# set slot for blade expansion
self._netbox_set_or_update_blade_expansion_slot(expansion, chassis, datacenter)
if update_inventory:
# Updates expansion inventory
inventory = Inventory(server=self, update_expansion=True)
inventory.create_or_update()
elif self.own_expansion_slot() and expansion:
expansion.delete()
expansion = None
update = 0
# for every other specs
# check hostname
if server.name != self.get_hostname():
server.name = self.get_hostname()
update += 1
server_tags = sorted(set([x.name for x in server.tags]))
tags = sorted(set(self.tags))
if server_tags != tags:
new_tags_ids = [x.id for x in self.nb_tags]
if not config.preserve_tags:
server.tags = new_tags_ids
else:
server_tags_ids = [x.id for x in server.tags]
server.tags = sorted(set(new_tags_ids + server_tags_ids))
update += 1
if server.custom_fields != self.custom_fields:
server.custom_fields = self.custom_fields
update += 1
if config.update_all or config.update_location:
ret, server = self.update_netbox_location(server)
update += ret
if server.platform != self.device_platform:
server.platform = self.device_platform
update += 1
if update:
server.save()
if expansion:
update = 0
expansion_name = server.name + ' expansion'
if expansion.name != expansion_name:
expansion.name = expansion_name
update += 1
if self.update_netbox_expansion_location(server, expansion):
update += 1
if update:
expansion.save()
logging.debug('Finished updating Server!')
def print_debug(self):
self.network = ServerNetwork(server=self)
print('Datacenter:', self.get_datacenter())
print('Netbox Datacenter:', self.get_netbox_datacenter())
print('Rack:', self.get_rack())
print('Netbox Rack:', self.get_netbox_rack())
print('Is blade:', self.is_blade())
print('Got expansion:', self.own_expansion_slot())
print('Product Name:', self.get_product_name())
print('Platform:', self.device_platform)
print('Chassis:', self.get_chassis())
print('Chassis service tag:', self.get_chassis_service_tag())
print('Service tag:', self.get_service_tag())
print('NIC:',)
pprint(self.network.get_network_cards())
pass
def own_expansion_slot(self):
"""
Indicates if the device hosts an expansion card
"""
return False
def own_gpu_expansion_slot(self):
"""
Indicates if the device hosts a GPU expansion card
"""
return False
def own_drive_expansion_slot(self):
"""
Indicates if the device hosts a drive expansion bay
"""
return False
| [
[
[
7,
42
],
[
654,
663
],
[
698,
707
],
[
763,
772
],
[
826,
835
],
[
891,
900
]
],
[
[
75,
81
],
[
984,
990
],
[
1158,
1164
],
[
1100,
1106
],
[
1301,
1307
],
[
4842,
4848
],
[
4960,
4966
],
[
5824,
5830
],
[
6590,
6596
],
[
7588,
7594
],
[
8805,
8811
]
],
[
[
114,
135
],
[
1778,
1780
],
[
2174,
2176
],
[
4165,
4167
],
[
6023,
6025
],
[
6990,
6992
],
[
8022,
8024
],
[
8593,
8595
],
[
9233,
9235
],
[
9772,
9774
],
[
9856,
9858
],
[
10473,
10475
],
[
10595,
10597
],
[
12264,
12266
],
[
13946,
13948
],
[
14202,
14204
],
[
14514,
14516
],
[
15507,
15509
]
],
[
[
171,
180
],
[
15142,
15151
],
[
16065,
16074
]
],
[
[
215,
225
],
[
1916,
1926
]
],
[
[
227,
231
],
[
3814,
3818
]
],
[
[
233,
239
],
[
1601,
1607
]
],
[
[
270,
288
],
[
1213,
1231
]
],
[
[
290,
305
],
[
5808,
5823
],
[
6574,
6589
],
[
7572,
7587
],
[
8789,
8804
]
],
[
[
307,
322
],
[
5750,
5765
],
[
6638,
6653
],
[
7636,
7651
],
[
8854,
8869
]
],
[
[
324,
343
],
[
964,
983
]
],
[
[
377,
390
],
[
14846,
14859
],
[
17857,
17870
]
],
[
[
422,
433
],
[
15356,
15367
]
],
[
[
453,
459
],
[
18505,
18511
]
],
[
[
467,
477
],
[
4939,
4949
]
],
[
[
485,
492
],
[
2061,
2068
],
[
2262,
2269
],
[
2641,
2648
],
[
2988,
2995
],
[
4053,
4060
],
[
5908,
5915
],
[
6766,
6773
],
[
7794,
7801
],
[
9096,
9103
],
[
10731,
10738
],
[
11611,
11618
],
[
12404,
12411
],
[
12545,
12552
],
[
14679,
14686
],
[
15649,
15656
],
[
17763,
17770
]
],
[
[
500,
506
],
[
4902,
4908
]
],
[
[
514,
517
],
[
2145,
2148
],
[
2337,
2340
],
[
4137,
4140
]
],
[
[
526,
536
]
]
] |
test = {
'name': 'q3_1_8',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> genre_and_distances.labels == ('Genre', 'Distance')
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> genre_and_distances.num_rows == train_movies.num_rows
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> print(genre_and_distances.group('Genre'))
Genre | count
comedy | 113
thriller | 201
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> np.allclose(genre_and_distances.column('Distance'), sorted(fast_distances(test_my_features.row(0), train_my_features)))
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
[
[
0,
4
]
]
] |
#!/usr/bin/env python3
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import pygion
from pygion import task
@task
def main():
nprocs = pygion.Tunable.select(pygion.Tunable.GLOBAL_PYS).get()
print("Number of Python processors: %s" % nprocs)
if __name__ == '__main__':
main()
| [
[
[
633,
647
]
],
[
[
656,
662
],
[
719,
725
],
[
741,
747
]
],
[
[
682,
686
],
[
689,
693
]
],
[
[
698,
702
],
[
860,
864
]
]
] |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.system
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.system import XSimpleMailMessage as XSimpleMailMessage
setattr(XSimpleMailMessage, '__ooo_ns__', 'com.sun.star.system')
setattr(XSimpleMailMessage, '__ooo_full_ns__', 'com.sun.star.system.XSimpleMailMessage')
setattr(XSimpleMailMessage, '__ooo_type_name__', 'interface')
else:
from ...lo.system.x_simple_mail_message import XSimpleMailMessage as XSimpleMailMessage
__all__ = ['XSimpleMailMessage']
| [
[
[
760,
773
],
[
859,
872
],
[
939,
952
]
],
[
[
805,
820
],
[
894,
909
]
],
[
[
822,
833
],
[
878,
889
]
],
[
[
834,
842
],
[
957,
965
]
],
[
[
915,
923
],
[
957,
965
]
],
[
[
1003,
1043
],
[
1056,
1074
],
[
1125,
1143
],
[
1218,
1236
]
],
[
[
1329,
1369
]
],
[
[
1371,
1378
]
]
] |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pathlib
import shutil
import subprocess
import sys
import nox # type: ignore
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt"
PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8")
nox.sessions = [
"unit",
"cover",
"mypy",
"check_lower_bounds"
# exclude update_lower_bounds from default
"docs",
]
@nox.session(python=['3.6', '3.7', '3.8', '3.9'])
def unit(session):
"""Run the unit test suite."""
session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/cloud/asset_v1p4beta1/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python='3.7')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=['3.6', '3.7'])
def mypy(session):
"""Run the type checker."""
session.install('mypy', 'types-pkg_resources')
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session
def update_lower_bounds(session):
"""Update lower bounds in constraints.txt to match setup.py"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'update',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session
def check_lower_bounds(session):
"""Check lower bounds in setup.py are reflected in constraints file"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'check',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session(python='3.6')
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| [
[
[
607,
609
],
[
1497,
1499
],
[
3187,
3189
],
[
3445,
3447
],
[
3501,
3503
],
[
3535,
3537
]
],
[
[
617,
624
],
[
718,
725
]
],
[
[
632,
638
],
[
3173,
3179
]
],
[
[
646,
656
],
[
844,
854
]
],
[
[
664,
667
],
[
869,
872
]
],
[
[
677,
680
],
[
928,
931
],
[
1070,
1073
],
[
1562,
1565
],
[
1958,
1961
],
[
2216,
2219
],
[
2597,
2600
],
[
2983,
2986
]
],
[
[
698,
715
],
[
791,
808
]
],
[
[
760,
788
],
[
2557,
2585
],
[
2944,
2972
]
],
[
[
829,
841
],
[
2501,
2513
],
[
2888,
2900
]
],
[
[
1123,
1127
]
],
[
[
1592,
1597
]
],
[
[
1997,
2001
]
],
[
[
2232,
2251
]
],
[
[
2613,
2631
]
],
[
[
3013,
3017
]
]
] |