hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68892c68002cfb1c3d106ba2af595041dfbab2f8 | 4,420 | py | Python | bootwrap/components/anchor.py | aitorres/bootwrap | dd03d2790e3abda8f461cc92203225643b6077f7 | [
"MIT"
] | null | null | null | bootwrap/components/anchor.py | aitorres/bootwrap | dd03d2790e3abda8f461cc92203225643b6077f7 | [
"MIT"
] | null | null | null | bootwrap/components/anchor.py | aitorres/bootwrap | dd03d2790e3abda8f461cc92203225643b6077f7 | [
"MIT"
] | null | null | null | """
An anchor.
"""
import warnings
from .base import (
WebComponent,
ClassMixin,
ActionMixin,
AppearanceMixin,
Action
)
from .panel import Panel
from .dialog import Dialog
from .utils import attr, inject
class Anchor(WebComponent, ClassMixin, ActionMixin, AppearanceMixin):
"""A web component for an anchor.
The `Anchor` component is used for creating a hyperlink to pages, files,
email addresses, locations on the same page, or other web-resources
defined by a URL address. The <i>Bootwrap</i> also uses the `Anchor` in
conjunction with other components, for example, in creating a navigation
menu.
Args:
inner (str|WebComponent): The object wrapped by the anchor.
Example:
Anchor('Google Search').link('https://www.google.com/')
Demo:
from bootwrap import Anchor
output = Anchor('Google Search').link('https://www.google.com/')
"""
def __init__(self, inner=None):
super().__init__()
self._inner = inner
self.__role = None
def __str__(self):
name = None
if isinstance(self._inner, WebComponent):
name = self._inner.identifier
if self._category is not None:
self.add_classes(f'text-{self._category}')
if self._action == Action.LINK:
if isinstance(self._target, WebComponent):
href = f'#{self._target.identifier}'
else: # type(target) == str
href = self._target
return f'''
<a {attr("id", self.identifier)}
{attr("class", self.classes)}
{attr("href", href)}
{attr("role", self.__role)}>
{inject(self._inner)}
</a>
'''
elif self._action == Action.TOGGLE:
if isinstance(self._target, Panel):
data_toggle = 'tab'
if self._target.classes is not None:
if 'collapse' in self._target.classes:
data_toggle = 'collapse'
return f'''
<a {attr('id', self.identifier)}
{attr('class', self.classes)}
{attr("href", f'#{self._target.identifier}')}
{attr("data-toggle", data_toggle)}
data-target="#{self._target.identifier}">
{inject(self._inner)}
</a>
'''
elif isinstance(self._target, Dialog):
return f'''
<a {attr("id", self.identifier)}
{attr("class",self.classes)}
{attr("href", f'#{self._target.identifier}')}
{attr("data-toggle", "modal")}
{attr("role", self.__role or 'modal')}>
{inject(self._inner)}
</a>
'''
raise TypeError(
'The toggle operation cannot be applied to the '
f'{type(self._target)} web component;',
)
elif self._action == Action.DISMISS:
return f'''
<a {attr('id', self.identifier)}
{attr('class', self.classes)}
{attr("href", f'#')}
data-dismiss="modal">
{inject(self._inner)}
</a>
'''
elif self._action == Action.SUBMIT:
warnings.warn(
'Avoid using an anchor for performing submit action and use '
'a button instead. If you still decided to use anchor, the '
'rendering script will forcefully replace it with a button.',
category=RuntimeWarning
)
self.add_classes('btn')
return f'''
<button {attr('id', self.identifier)}
{attr('class', self.classes)}
type="submit">
{inject(self._inner)}
</button>
'''
else:
return f'''
<a {attr("id", self.identifier)}
{attr("name", name)}
{attr("class", self.classes)}
{attr("role", self.__role)}>
{inject(self._inner)}
</a>
'''
| 34.263566 | 77 | 0.480769 |
d3c12121735bfb0f15f057b80f245587a102750d | 6,762 | py | Python | Python/incr.py | Antreasgr/Random-Graphs | b517cd13fd16cf1a28894abde9cdef0398ca0083 | [
"MIT"
] | null | null | null | Python/incr.py | Antreasgr/Random-Graphs | b517cd13fd16cf1a28894abde9cdef0398ca0083 | [
"MIT"
] | null | null | null | Python/incr.py | Antreasgr/Random-Graphs | b517cd13fd16cf1a28894abde9cdef0398ca0083 | [
"MIT"
] | null | null | null | from MVA import *
from report_generator import *
from math import sqrt, ceil, floor
def split_edges_k(m_parameters, upper_bound, rand, k=1):
"""
INCR Algorithm. Split clique tree edges
"""
dis_set = UnionFind()
[dis_set[i] for i in range(m_parameters.num_maximal_cliques)]
loops = 0
while m_parameters.edges_list and m_parameters.num_edges < upper_bound:
loops += 1
(x, y, sep, omega), index = rand.next_element(m_parameters.edges_list)
i = dis_set[x]
j = dis_set[y]
x_sep, y_sep = m_parameters.cliques[i] - set(sep), m_parameters.cliques[j] - set(sep)
if len(x_sep) == 0 or len(y_sep) == 0:
raise Exception("Not valid clique tree")
elif len(x_sep) <= k or len(y_sep) <= k:
# merge {x,y}
edges_added = len(x_sep) * len(y_sep)
dis_set.union(x, y)
m_parameters.cliques[i].update(m_parameters.cliques[j])
m_parameters.cardinality_array[i] += len(y_sep)
m_parameters.cliques[j] = set()
m_parameters.cardinality_array[j] = 0
m_parameters.num_edges += edges_added
m_parameters.num_maximal_cliques -= 1
# delete old edge
del m_parameters.edges_list[index]
elif len(x_sep) <= k:
# merge {x,z}
ylen = rand.next_random(1, len(y_sep))
y_random = list(y_sep)[0:ylen]
m_parameters.cliques[i].update(y_random)
m_parameters.cardinality_array[i] += ylen
# update the edge min-seperator
m_parameters.edges_list[index] = (x, y, y_random + sep, omega + ylen)
# update num of edges
m_parameters.num_edges += ylen * len(x_sep)
elif len(y_sep) <= k:
# merge {y,z}
xlen = rand.next_random(1, len(x_sep))
x_random = list(x_sep)[0:xlen]
m_parameters.cliques[j].update(x_random)
m_parameters.cardinality_array[j] += xlen
# update the edge min-seperator
m_parameters.edges_list[index] = (x, y, x_random + sep, omega + xlen)
# update num of edges
m_parameters.num_edges += xlen * len(y_sep)
else:
# make new z node
xlen = rand.next_random(1, len(x_sep))
ylen = rand.next_random(1, len(y_sep))
x_random = list(x_sep)[0:xlen]
y_random = list(y_sep)[0:ylen]
z = set(x_random + y_random + sep)
edges_added = xlen * ylen
# print(str(edges_added) + " / " + str(len(x_sep) * len(y_sep)))
# add node to list
m_parameters.cliques.append(z)
m_parameters.cardinality_array.append(len(z))
# add x-z edge
m_parameters.edges_list.append((x, len(m_parameters.cliques) - 1, x_random + sep, omega + edges_added))
# add y-z edge
m_parameters.edges_list.append((y, len(m_parameters.cliques) - 1, y_random + sep, omega + edges_added))
m_parameters.num_maximal_cliques += 1
# update num of edges
m_parameters.num_edges += edges_added
# delete old edge
del m_parameters.edges_list[index]
return loops
def init_k_tree_incr(num_vertices, k, rand):
ct_tree = MVAParameters(0, 0, [], [], [])
ct_tree.cliques.append(set([i for i in range(k + 1)]))
ct_tree.cardinality_array.append(k + 1)
ct_tree.num_maximal_cliques = 1
ct_tree.num_edges = int(k * (k + 1) / 2)
root = ct_tree.cliques[0]
for u in range(1, num_vertices - k):
i = rand.next_random(0, len(ct_tree.cliques))
y = rand.next_random(0, len(ct_tree.cliques[i]))
sep = [x for ii, x in enumerate(ct_tree.cliques[i]) if ii != y]
ct_tree.cliques.append(set(sep + [u + k]))
ct_tree.cardinality_array.append(k + 1)
ct_tree.num_maximal_cliques += 1
ct_tree.num_edges += k
new_node = ct_tree.cliques[-1]
ct_tree.edges_list.append((i, len(ct_tree.cliques) - 1, sep, k))
return ct_tree
def Run_INCR(num_vertices, edge_density, algorithm_name, k, init_tree=None):
"""
Initialize and run the MVA algorithm
"""
edges_bound = int(edge_density * (num_vertices * (num_vertices - 1) / 2))
k = max(1, k * edges_bound)
runner = runner_factory(num_vertices, algorithm_name, None, edges_bound=edges_bound, edge_density=edge_density, k=k)
randomizer = Randomizer(2 * num_vertices, runner["Parameters"]["seed"])
with Timer("t_expand_cliques", runner["Times"]):
if init_tree == "ktree":
ktree_k = 1 / 2 * (2 * num_vertices - 1 - sqrt(((2 * num_vertices - 1) * (2 * num_vertices - 1)) - (8 * edges_bound)))
ktree_k = int(floor(ktree_k))
k_edges = (num_vertices - ktree_k - 1) * ktree_k + (ktree_k * (ktree_k + 1) / 2)
p_mva = init_k_tree_incr(runner["Parameters"]["n"], ktree_k, randomizer)
print("- Init with " + str(ktree_k) + "-tree:")
elif init_tree == "tree":
p_mva = expand_tree(runner["Parameters"]["n"], randomizer)
print("- Expand tree:")
else:
p_mva = expand_cliques(runner["Parameters"]["n"], randomizer)
print("- Expand cliques:")
print(p_mva)
with Timer("t_split_edges", runner["Times"]):
loops = split_edges_k(p_mva, runner["Parameters"]["edges_bound"], randomizer, k)
print("- Split edges:")
runner["Stats"]["total"] = runner["Times"]["t_split_edges"] + runner["Times"]["t_expand_cliques"]
runner["Stats"]["loops%"] = loops / edges_bound
print(" loops:", runner["Stats"]["loops%"])
print(p_mva)
return calculate_mva_statistics(p_mva, runner, randomizer, num_vertices)
NUM_VERTICES = [
50,
100,
500,
1000,
2500,
5000,
10000, # 50000, 100000, 500000, 1000000
]
EDGES_DENSITY = [0.1, 0.33, 0.5, 0.75, 0.99]
NAME = "INCR_KTREE_k_1"
if __name__ == '__main__':
mva_data = []
for num in NUM_VERTICES:
for edge_density in EDGES_DENSITY:
Runners = []
for _ in range(10):
Runners.append(Run_INCR(num, edge_density, NAME, 0, "ktree"))
# filename = "Results/" + NAME + "/Run_{}_{}_{}.yml".format(num, edge_density, datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
# if not os.path.isdir(os.path.dirname(filename)):
# os.makedirs(os.path.dirname(filename))
# with io.open(filename, 'w') as file:
# print_statistics(Runners, file)
print("Done")
mva_data.append(merge_runners(Runners))
run_reports_data(NAME, mva_data) | 36.95082 | 136 | 0.587252 |
4909075b90f779efa0cc283e1cf15a85409c64e5 | 1,003 | py | Python | pycodegen/frontend/frontend_cpp/__init__.py | blejdfist/pycodegen | b7a7fad2c9e0a537893e53df0e07544d047e443d | [
"MIT"
] | 5 | 2019-02-15T16:13:43.000Z | 2021-07-22T02:54:57.000Z | pycodegen/frontend/frontend_cpp/__init__.py | blejdfist/pycodegen | b7a7fad2c9e0a537893e53df0e07544d047e443d | [
"MIT"
] | 1 | 2019-12-06T20:24:36.000Z | 2020-05-04T18:43:12.000Z | pycodegen/frontend/frontend_cpp/__init__.py | blejdfist/pycodegen | b7a7fad2c9e0a537893e53df0e07544d047e443d | [
"MIT"
] | null | null | null | """C/C++ parser frontend based on libclang"""
import argparse
import logging
import sys
_LOGGER = logging.getLogger(__name__)
def register_arguments(argument_parser):
argument_parser.add_argument("--args", nargs=argparse.REMAINDER,
help="Arguments to pass to clang")
argument_parser.add_argument("--print-ast", action="store_true",
help="Print AST to console")
def run(filename, options=None):
try:
import clang.cindex
except ModuleNotFoundError:
_LOGGER.error("To use the C++ frontend you must have clang>=6.0.0 installed.")
_LOGGER.error("Try installing it using: pip install 'pycodegen[CPP]'")
sys.exit(1)
from .parser_libclang import ParserLibClang
if options is None:
options = {}
parser = ParserLibClang()
if options.get('print_ast'):
print(parser.dump(filename, options.get('args')))
return parser.parse(filename, options.get('args'))
| 27.861111 | 86 | 0.65005 |
6d5c9fbf5046ce984210e2d15346d3d8601d7b59 | 827 | py | Python | 01_fuzzingCrashingProgram.py | F-Masood/ExploitingBufferOverflows | 9397dc2bc45255a50379cd105e8b4b6242993a4b | [
"MIT"
] | 3 | 2021-12-09T14:40:49.000Z | 2022-02-22T04:21:07.000Z | 01_fuzzingCrashingProgram.py | F-Masood/Exploiting_StackBased_BufferOverflows | 9397dc2bc45255a50379cd105e8b4b6242993a4b | [
"MIT"
] | null | null | null | 01_fuzzingCrashingProgram.py | F-Masood/Exploiting_StackBased_BufferOverflows | 9397dc2bc45255a50379cd105e8b4b6242993a4b | [
"MIT"
] | null | null | null | #!/usr/bin/python
#once the program crashes in Windows, CLOSE it manually or this PROGRAM may keep running forever
import sys,socket
from time import sleep
buffer = "A"*100
while True:
try:
print ("Sending buffer of length " + str(len(buffer)) )
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(2)
res = s.connect_ex(('192.168.10.51',2371))
if res == 0:
print("The Port is open\n")
else:
print("The Port is closed\n")
s.close()
sys.exit()
res = s.connect(('192.168.10.51',2371))
buffer = buffer + "A"*250
s.send((buffer))
s.close()
sleep(2)
except:
print ("Program crashed at %s bytes" %str(len(buffer)))
sys.exit()
| 22.351351 | 96 | 0.540508 |
249b8e65ac9d671f8c4827e93a09a6afc5524620 | 1,731 | py | Python | driver/util.py | nitinkaveriappa/downward | 5c9a1b5111d667bb96f94da61ca2a45b1b70bb83 | [
"MIT"
] | null | null | null | driver/util.py | nitinkaveriappa/downward | 5c9a1b5111d667bb96f94da61ca2a45b1b70bb83 | [
"MIT"
] | null | null | null | driver/util.py | nitinkaveriappa/downward | 5c9a1b5111d667bb96f94da61ca2a45b1b70bb83 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import re
DRIVER_DIR = os.path.abspath(os.path.dirname(__file__))
REPO_ROOT_DIR = os.path.dirname(DRIVER_DIR)
BUILDS_DIR = os.path.join(REPO_ROOT_DIR, "builds")
def get_elapsed_time():
"""
Return the CPU time taken by the python process and its child
processes.
"""
if os.name == "nt":
# The child time components of os.times() are 0 on Windows. If
# we ever end up using this method on Windows, we need to be
# aware of this, so it's prudent to complain loudly.
raise NotImplementedError("cannot use get_elapsed_time() on Windows")
return sum(os.times()[:4])
def find_domain_filename(task_filename):
"""
Find domain filename for the given task using automatic naming rules.
"""
dirname, basename = os.path.split(task_filename)
domain_basenames = [
"domain.pddl",
basename[:3] + "-domain.pddl",
"domain_" + basename,
]
for domain_basename in domain_basenames:
domain_filename = os.path.join(dirname, domain_basename)
if os.path.exists(domain_filename):
return domain_filename
raise SystemExit(
"Error: Could not find domain file using automatic naming rules.")
# Shell-escaping code taken from Python's shlex.quote (missing in Python < 3.3).
_find_unsafe = re.compile(r'[^\w@%+=:,./-]').search
def shell_escape(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# Use single quotes, and put single quotes into double quotes.
# The string $'b is then quoted as '$'"'"'b'.
return "'" + s.replace("'", "'\"'\"'") + "'"
| 29.844828 | 80 | 0.636626 |
9890630bc1932c400ebb2a3a7f8829eb14db4351 | 1,747 | py | Python | src/mmxnet/mdropout.py | mumupy/mmdeeplearning | fb10a652197556d76ff025c631afa5645ca2cf24 | [
"Apache-2.0"
] | 9 | 2019-10-25T03:50:02.000Z | 2022-03-22T13:22:11.000Z | src/mmxnet/mdropout.py | mumupy/mmdeeplearning | fb10a652197556d76ff025c631afa5645ca2cf24 | [
"Apache-2.0"
] | 11 | 2019-12-11T14:47:54.000Z | 2022-02-10T00:23:57.000Z | src/mmxnet/mdropout.py | mumupy/mmdeeplearning | fb10a652197556d76ff025c631afa5645ca2cf24 | [
"Apache-2.0"
] | 1 | 2021-04-20T07:30:42.000Z | 2021-04-20T07:30:42.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/26 18:12
# @Author : ganliang
# @File : mdropout.py
# @Desc : 过拟合解决办法-丢弃发
import d2lzh as d2l
from mxnet import nd, gluon, autograd, init
from mxnet.gluon import nn, loss as gloss
from src.config import logger
def dropout(X, drop_rate):
"""
丢弃X数据集中的rate比例数据
:param X: 数据集合
:param drop_rate: 丢弃比例
:return:
"""
assert 0 <= drop_rate <= 1
logger.info("丢弃之前的数据:\n%s" % X)
keep_rate = 1 - drop_rate
if keep_rate == 0:
return nd.zeros_like(X)
drop_x = nd.random.uniform(0, 1, shape=X.shape)
logger.info("丢弃随机数据:%s" % drop_x)
mask = drop_x < keep_rate
logger.info("丢弃之后数据:\n%s" % mask)
return mask * X
def dropout2(X, drop_rate):
autograd.set_training(True)
Z = nd.zeros_like(X)
nd.Dropout(X, p=drop_rate, out=Z)
return Z
def dropout_gluon():
drop_prob1, drop_prob2, lr, batch_size, num_epochs = 0.2, 0.5, 0.1, 64, 50
net = nn.Sequential()
net.add(nn.Dense(256, activation="relu"),
nn.Dropout(drop_prob1), # 在第一个全连接层后添加丢弃层
nn.Dense(256, activation="relu"),
nn.Dropout(drop_prob2), # 在第二个全连接层后添加丢弃层
nn.Dense(10))
net.initialize(init.Normal(sigma=0.01))
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
loss = gloss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None,
None, trainer)
if __name__ == "__main__":
x = nd.arange(64).reshape(8, 8)
logger.info(dropout(x, 0.5))
logger.info(dropout2(x, 0.5) / 2)
dropout_gluon()
| 25.318841 | 81 | 0.627361 |
86c82d6209e78988ca418a2a60f25cc300aff88f | 860 | py | Python | py-drone-vision/ardupilot.py | Witherlord/quadcopter-matlab | 09f922e10919a69fd4751f8b257296b2ab2bdd64 | [
"MIT"
] | 2 | 2021-01-14T09:45:43.000Z | 2021-10-15T03:30:41.000Z | py-drone-vision/ardupilot.py | Witherlord/quadcopter-matlab | 09f922e10919a69fd4751f8b257296b2ab2bdd64 | [
"MIT"
] | null | null | null | py-drone-vision/ardupilot.py | Witherlord/quadcopter-matlab | 09f922e10919a69fd4751f8b257296b2ab2bdd64 | [
"MIT"
] | 1 | 2021-01-14T09:45:40.000Z | 2021-01-14T09:45:40.000Z | import cv2
import numpy as np
import os
import RPi.GPIO as gpio
import time
import pigpio
pi = pigpio.pi()
gpio.setmode(gpio.BCM)
pin_x_l = 12
pin_x_r = 16
pin_y_up = 20
pin_y_d = 21
freq_y = 50
gpio.setup(pin_x_l, gpio.IN)
gpio.setup(pin_x_r, gpio.IN)
gpio.setup(pin_y_up, gpio.OUT)
gpio.setup(pin_y_d, gpio.OUT)
pwmObject_y_up = gpio.PWM(pin_y_up, freq_y)
pwmObject_y_d = gpio.PWM(pin_y_d, freq_y)
pwmObject_y_up.start(0)
pwmObject_y_d.start(0)
while True:
cb1 = pi.callback(pin_x_l, pigpio.RISING_EDGE)
CT = time.time()
pi.wait_for_edge(pin_x_l, pigpio.FALLING_EDGE)
CT_HIGH = time.time() - CT
# pi.wait_for_edge(pin_x_l, pigpio.RISING_EDGE)
# CT_LOW = time.time() - CT_HIGH - CT
# time.sleep(CT_HIGT)
#cb1 = pi.callback(pin_x_l, pigpio.RISING_EDGE)
# FULL = CT_HIGH + CT_LOW
#
# DC = (CT_HIGH/FULL)*100
print("DC=" + str(CT_HIGH))
| 16.862745 | 48 | 0.723256 |
129b5c767d6f1ac9effb117747784d11c708a595 | 3,432 | py | Python | src/core/uv_edit/data/poly.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 63 | 2016-01-02T16:28:47.000Z | 2022-01-19T11:29:51.000Z | src/core/uv_edit/data/poly.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 12 | 2016-06-12T14:14:15.000Z | 2020-12-18T16:11:45.000Z | src/core/uv_edit/data/poly.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 17 | 2016-05-23T00:02:27.000Z | 2021-04-25T17:48:27.000Z | from ..base import *
class Polygon:
__slots__ = ("type", "id", "picking_color_id", "vertex_ids", "edge_ids",
"uv_data_obj", "_tri_data", "_center_pos")
def __init__(self, poly_id, picking_col_id, uv_data_obj=None, triangle_data=None,
vert_ids=None, edge_ids=None, data_copy=None):
self.type = "poly"
self.id = poly_id
self.picking_color_id = picking_col_id
self.uv_data_obj = uv_data_obj
if data_copy:
center_pos = data_copy["center_pos"]
triangle_data = data_copy["tri_data"]
vert_ids = data_copy["vert_ids"]
edge_ids = data_copy["edge_ids"]
else:
center_pos = Point3()
self._center_pos = center_pos
self._tri_data = triangle_data
self.vertex_ids = vert_ids
self.edge_ids = edge_ids
def copy(self):
data_copy = {}
data_copy["center_pos"] = self._center_pos
data_copy["tri_data"] = self._tri_data[:]
data_copy["vert_ids"] = self.vertex_ids[:]
data_copy["edge_ids"] = self.edge_ids[:]
poly = Polygon(self.id, self.picking_color_id, data_copy=data_copy)
return poly
def __getitem__(self, index):
try:
return self._tri_data[index]
except IndexError:
raise IndexError("Index out of range.")
except TypeError:
raise TypeError("Index must be an integer value.")
def __len__(self):
"""
Return the size of the polygon corresponding to the number of data rows of
the associated GeomTriangles object.
"""
return len(self._tri_data) * 3
@property
def merged_subobj(self):
return self
@property
def neighbor_ids(self):
merged_verts = self.uv_data_obj.merged_verts
neighbor_ids = set()
for vert_id in self.vertex_ids:
neighbor_ids.update(merged_verts[vert_id].polygon_ids)
neighbor_ids.remove(self.id)
return neighbor_ids
@property
def vertices(self):
verts = self.uv_data_obj.get_subobjects("vert")
return [verts[vert_id] for vert_id in self.vertex_ids]
@property
def edges(self):
edges = self.uv_data_obj.get_subobjects("edge")
return [edges[edge_id] for edge_id in self.edge_ids]
@property
def vertex_count(self):
return len(self.vertex_ids)
@property
def row_indices(self):
verts = self.uv_data_obj.get_subobjects("vert")
return [verts[vert_id].row_index for vert_id in self.vertex_ids]
@property
def special_selection(self):
polys = [self]
if GD["uv_edit_options"]["sel_polys_by_cluster"]:
polys = self.uv_data_obj.get_polygon_cluster(self.id)
return polys
def get_center_pos(self, ref_node=None):
if ref_node:
origin = self.uv_data_obj.origin
return ref_node.get_relative_point(origin, self._center_pos)
return self._center_pos
@property
def center_pos(self):
return self.get_center_pos()
@center_pos.setter
def center_pos(self, center_pos):
self._center_pos = center_pos
def update_center_pos(self):
verts = self.vertices
positions = [vert.get_pos() for vert in verts]
self._center_pos = sum(positions, Point3()) / len(positions)
| 25.235294 | 85 | 0.624126 |
1e4c732567fb6381aca4916c6ab6f32d44ff63a1 | 27 | py | Python | ven2/lib/python2.7/site-packages/zope/i18n/locales/tests/__init__.py | manliu1225/Facebook_crawler | 0f75a1c4382dd4effc3178d84b99b0cad97337cd | [
"Apache-2.0"
] | 1 | 2021-09-15T20:22:39.000Z | 2021-09-15T20:22:39.000Z | ven2/lib/python2.7/site-packages/zope/i18n/locales/tests/__init__.py | manliu1225/Facebook_crawler | 0f75a1c4382dd4effc3178d84b99b0cad97337cd | [
"Apache-2.0"
] | 43 | 2015-01-28T21:04:24.000Z | 2021-12-09T07:33:35.000Z | ven2/lib/python2.7/site-packages/zope/i18n/locales/tests/__init__.py | manliu1225/Facebook_crawler | 0f75a1c4382dd4effc3178d84b99b0cad97337cd | [
"Apache-2.0"
] | 7 | 2015-04-03T09:00:54.000Z | 2020-05-14T14:16:25.000Z | # Test package for locales
| 13.5 | 26 | 0.777778 |
d78c845fe8798f4e959a7e55e23c1b9d3a7e6eef | 7,575 | py | Python | lists.py | alexhalme/signover | c2f65a046d54a419187a99c7d4b6faaaa38caec8 | [
"MIT"
] | null | null | null | lists.py | alexhalme/signover | c2f65a046d54a419187a99c7d4b6faaaa38caec8 | [
"MIT"
] | null | null | null | lists.py | alexhalme/signover | c2f65a046d54a419187a99c7d4b6faaaa38caec8 | [
"MIT"
] | null | null | null | import _clibash
import locaf as af
from locaf import En
import wsql
import gv
import re
import tf
from cryptoAES import CryW
from crypto25519 import Crypt25519
import time
import json
import zlib
# 0 - disabled, 1 - superuser, 2 - admin, 3 - regular, 4 - view only
PRIVS = {
0: {0:[], 1:[], 2:[], 3:[], 4:[]},
1: {0:[0, 1, 2, 3, 4], 1:[], 2:[0, 1, 2, 3, 4], 3:[0, 1, 2, 3, 4], 4:[0, 1, 2, 3, 4]},
2: {0:[0, 2, 3, 4], 1:[], 2:[], 3:[0, 2, 3, 4], 4:[0, 2, 3, 4]},
3: {0:[], 1:[], 2:[], 3:[], 4:[]},
4: {0:[], 1:[], 2:[], 3:[], 4:[]}
}
class SOList:
def __init__(self, auth, luid):
self.auth, self.luid = auth, luid
if not luid:
self.createList()
self.auth.sql.reconnect()
self.reload()
def reload(self):
self.auth.sql.reconnect()
rights = self.auth.sql.getDataDicts('rights', where = f"luid = '{self.luid}' AND uuid = '{self.auth.userDict['uuid']}'")
self.rights = rights[0] if rights else None
solst = self.auth.sql.getOneDataDict('lists', 'luid', self.luid)
self.active = solst['active']
if not solst:
self.active = self.rights = self.dat = self.aes = None
return None
self.aes = self.auth.keyring.decrypt(self.rights['aes'])
if not self.aes:
self.active = self.rights = self.dat = self.aes = None
return None
self.dat = tf.bj(solst['dat'], self.aes)
if not self.dat:
self.active = self.rights = self.dat = self.aes = None
return None
def selection(self, select = True):
self.auth.sql.wesc('UPDATE rights SET disp* WHERE uuid* AND luid*', v = [(select, self.auth.userDict['uuid'], self.luid)])
return True
def vueList(self, summary = False):
retval = {
'admin': self.rights['priv'],
'list': {
'luid': self.luid,
'dat': self.dat,
'active': self.active
}
}
if summary:
return retval
return {**retval,
'patients': self.vuePatients(),
'rights': self.vueRights()
}
def vueRights(self):
if not self.rights['priv'] in [1, 2]:
return []
query = self.auth.sql.fetch(
f"SELECT uuid, email, dat, ("
f" SELECT priv FROM rights WHERE luid = '{self.luid}' AND rights.uuid = users.uuid"
f") FROM users WHERE uuid in ("
f" SELECT uuid FROM rights WHERE luid = '{self.luid}' AND priv != 0"
f") AND !ISNULL(priv)"
)
retval = [dict(zip(['uuid', 'email', 'dat', 'priv'], x)) for x in query]
return [{**x, 'dat': tf.bj(x['dat'])} for x in retval]
def vuePatients(self):
return []
def updateList(self, newDat = None, newActive = None):
if newDat:
# remove if requested
newDat['cols'] = [col for col in newDat['cols'] if (True if not 'wipe' in col else not col['wipe'])]
# inactive at the end
newDat['cols'] = af.flst( [[colA for colA in newDat['cols'] if colA['active']], [colI for colI in newDat['cols'] if not colI['active']]])
self.auth.sql.replaceRows('lists', {
'luid': self.luid,
'dat': tf.jb(newDat if newDat else self.dat, self.aes),
'active': newActive if newActive else self.active
})
self.reload()
def createList(self):
self.luid = tf.getUUID()
self.aes = En()._rnd(32)
self.shareList(1, uuids = [self.auth.userDict['uuid']], first = True)
self.updateList(
newDat = {
'name': f"(nameless list)",
'cols': []
},
newActive = 1
)
# add empty col
self.updateCol()
# check if one list displayed else display this one
if not self.auth.sql.fetch(f"SELECT luid FROM lists WHERE luid IN (SELECT luid FROM rights WHERE uuid = '{self.auth.userDict['uuid']}' AND disp = 1) AND active = 1"):
self.auth.sql.wesc('UPDATE rights SET disp* WHERE uuid* AND luid*', v = [(True, self.auth.userDict['uuid'], self.luid)])
def updateCol(self, dat = None):
# new col requested
newDat = {k: v for k, v in self.dat.items()}
if not dat:
newDat['cols'].append({'type': 0, 'title': '(none)', 'active': True, 'width': 200, 'cuid': tf.getUUID()})
else:
newDat['cols'][af.kmap(newDat['cols'], 'cuid').index(dat['cuid'])].update(dat)
self.updateList(newDat = newDat)
return True
def unitAction(self, action, dat):
what = action.split('-')[1]
newDat = {k: v for k, v in self.dat.items()}
colIndex = af.kmap(newDat['cols'], 'cuid').index(dat['cuid'])
if what in ['keyboard_arrow_up', 'keyboard_arrow_down']:
colCopy = newDat['cols'][colIndex]
newDat['cols'].pop(colIndex)
newIndex = colIndex + {'keyboard_arrow_up': -1, 'keyboard_arrow_down': 1}.get(what)
if not len(newDat['cols']) >= newIndex > -1:
return False
newDat['cols'].insert(newIndex, colCopy)
if what in ['visibility', 'visibility_off']:
newDat['cols'][colIndex]['active'] = {'visibility_off': True, 'visibility': False}.get(what)
self.updateList(newDat = newDat)
def deleteRights(self, uluids):
self.auth.sql.deleteCond('rights', condition = ' OR '.join([f"(uuid = '{uuid}' AND luid = '{luid}')" for uuid, luid in af.iwList(uluids)]))
def shareList(self, priv, emails = None, uuids = None, first = False):
if False if first else not self.rights.get('priv'):
return False
if not priv in [0, 1, 2, 3, 4]:
return False
if emails:
wheres = ' OR '.join([f"email = '{email}'" for email in af.iwList(emails)])
if not wheres:
return False
query = self.auth.sql.fetch(f"SELECT uuid FROM users WHERE {wheres}")
if not query:
return False
uuids = [x[0] for x in query]
else:
if not uuids:
return False
uuids = af.iwList(uuids)
if self.auth.userDict['uuid'] in uuids and (True if first else priv <= self.rights.get('priv')):
self.auth.sql.wesc(
f"DELETE FROM rights WHERE uuid* AND luid*",
v = (self.auth.userDict['uuid'], self.luid)
)
rightsDict = {
'luid': self.luid,
'uuid': self.auth.userDict['uuid'],
'priv': priv,
'aes': self.auth.keyring.encrypt(self.aes, forceSealedBox = True)
}
# TODO: this?
self.auth.sql.wesc(f"INSERT INTO rights **", d = rightsDict)
if first:
return uuids
# possible uuids ie where emails match and not self user's uuid
uuidsPossible = list(set(uuids) - {self.auth.userDict['uuid']})
allowedPrivs = PRIVS.get(self.rights.get('priv'))
# where statement for privileges when priv would be allowed
uuidsAllowedWheres = ' OR '.join([f"priv = {k}" for k, v in allowedPrivs.items() if priv in v])
# uuids in rights table for this list where priv prevents Δ
uuidsProhibited = self.auth.sql.getOneCol('rights', 'uuid', where = f"luid = '{self.luid}' AND NOT ({uuidsAllowedWheres})")
# to-do list ie possible minus prohibited
uuidsToDo = list(set(uuidsPossible) - set(uuidsProhibited))
if not uuidsToDo:
return []
self.deleteRights([(uuid, self.luid) for uuid in uuidsToDo])
self.auth.sql.replaceRows('rights', [{
'uuid': uuid,
'luid': self.luid,
'priv': priv,
'aes': self.asymEncrypt(uuid, self.aes)
} for uuid in uuidsToDo])
return uuidsToDo
def asymEncrypt(self, uuid, blob):
return self._asymEncrypt(self.auth.sql, uuid, blob)
@classmethod
def _asymEncrypt(cls, sql, uuid, blob):
query = sql.fetch(f"SELECT pub FROM users WHERE uuid = '{uuid}'")
if not query:
return False
return Crypt25519(publicBytes = query[0][0]).encrypt(blob)
| 27.95203 | 170 | 0.598812 |
60559b05417d1ed416f0716d2eec604928c6e65d | 6,641 | py | Python | tips/tips.py | flaree/flare-cogs | b6053d9db9e456a30d48e0b90dd05f229299a8f6 | [
"MIT"
] | 38 | 2021-03-07T17:13:10.000Z | 2022-02-28T19:50:00.000Z | tips/tips.py | Ghalban/flare-cogs | aa535d3c898e67a52a1fa43b98d943cc7c0bcf51 | [
"MIT"
] | 44 | 2021-03-12T19:13:32.000Z | 2022-03-18T10:20:52.000Z | tips/tips.py | Ghalban/flare-cogs | aa535d3c898e67a52a1fa43b98d943cc7c0bcf51 | [
"MIT"
] | 33 | 2021-03-08T18:59:59.000Z | 2022-03-23T10:57:46.000Z | import functools
import random
import discord
from redbot.core import commands
from redbot.core.config import Config
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
real_send = commands.Context.send
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
# from: https://docs.python.org/3/library/stdtypes.html#str.format_map
class Default(dict):
"""Used with str.format_map to avoid KeyErrors on bad tips."""
def __missing__(self, key):
# returns missing keys as '{key}'
return f"{{{key}}}"
# Thanks Jack for smileysend
@functools.wraps(real_send)
async def send(self, content=None, **kwargs):
content = str(content) if content is not None else None
cog = self.bot.get_cog("Tips")
if (cog).usercache.get(self.author.id, {}).get("toggle", True) and random.randint(
1, cog.chance
) == 1:
tips = cog.message_cache or ["No tips configured."]
tip_msg = random.choice(tips).replace("{prefix}", self.clean_prefix)
new_content = cog.tip_format.format_map(
Default(
content=content or "",
tip_msg=tip_msg,
prefix=self.clean_prefix,
)
)
if len(new_content) <= 2000:
content = new_content
return await real_send(self, content, **kwargs)
class Tips(commands.Cog):
"""Tips - Credit to Jackenmen"""
__version__ = "0.0.2"
def format_help_for_context(self, ctx):
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\nCog Version: {self.__version__}"
def __init__(self, bot) -> None:
self.bot = bot
self.config = Config.get_conf(self, 176070082584248320, force_registration=True)
self.config.register_global(
tips=["Add tips by using `{prefix}tips add-tip`."],
chance=50,
tip_format="{tip_msg}\nYou can turn these tips off by typing `{prefix}tips off`\n\n{content}",
)
self.config.register_user(toggle=True)
async def initialize(self) -> None:
setattr(commands.Context, "send", send)
await self.generate_cache()
async def generate_cache(self):
self.usercache = await self.config.all_users()
self.message_cache = await self.config.tips()
self.chance = await self.config.chance()
self.tip_format = await self.config.tip_format()
def cog_unload(self) -> None:
setattr(commands.Context, "send", real_send)
@commands.group(invoke_without_command=True)
async def tips(self, ctx: commands.Context, toggle: bool) -> None:
"""
Toggle and setup tips.
Run `[p]tips off` to disable tips.
"""
await self.config.user(ctx.author).toggle.set(toggle)
await ctx.tick()
await self.generate_cache()
@commands.is_owner()
@tips.command()
async def chance(self, ctx, chance: int):
"""
Chance for a tip to show.
Default is 50
"""
if chance <= 1:
return await ctx.send("Chance must be greater than 1")
await self.config.chance.set(chance)
await self.generate_cache()
await ctx.tick()
@commands.is_owner()
@tips.command(name="add-tip", aliases=["add", "addtip", "create"])
async def add_tip(self, ctx, *, tip: str):
"""
Add a tip message.
Append `{prefix}` to have it formatted with prefix on send.
"""
async with self.config.tips() as replies:
if tip in replies:
return await ctx.send("That is already a response.")
replies.append(tip)
ind = replies.index(tip)
await ctx.send("Your tip has been added and is tip ID #{}".format(ind))
await self.generate_cache()
@commands.is_owner()
@tips.command(name="del-tip", aliases=["del", "deltip", "delete"])
async def del_tips(self, ctx, *, id: int):
"""Delete a custom tip."""
async with self.config.tips() as replies:
if not replies:
return await ctx.send("No custom tips are configured.")
if id > len(replies):
return await ctx.send("Invalid ID.")
replies.pop(id)
await ctx.send("Your tip has been removed")
await self.generate_cache()
@commands.is_owner()
@tips.command(name="list-tips", aliases=["list", "listtips"])
async def list_tips(
self,
ctx,
):
"""List custom tips."""
async with self.config.tips() as replies:
if not replies:
return await ctx.send("No tips have been configured.")
a = chunks(replies, 10)
embeds = []
i = 0
for item in a:
items = []
for strings in item:
items.append(f"**Reply {i}**: {strings}")
i += 1
embed = discord.Embed(
colour=await self.bot.get_embed_colour(ctx.channel),
description="\n".join(items),
)
embeds.append(embed)
if len(embeds) == 1:
await ctx.send(embed=embeds[0])
else:
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.is_owner()
@tips.command(name="format")
async def format(
self,
ctx: commands.Context,
*,
formatting=None,
):
"""
Set the format for tip messages.
Variables:
`tip_msg` - the tip
`content` - the original message content
`prefix` - the invocation prefix
Default value:
`{tip_msg}\\nYou can turn these tips off by typing `{prefix}tips off`\\n\\n{content}`
"""
if formatting:
await self.config.tip_format.set(formatting)
await ctx.channel.send(
f"The tip format has been set to:\n{formatting}"
) # intentionally uses ctx.channel to avoid tips being triggered
else:
await self.config.tip_format.clear()
await ctx.channel.send("The tip format has been reset to the default.")
await self.generate_cache()
content = "This is example content of a message with a tip."
tips = self.message_cache or ["No tips configured."]
tip_msg = random.choice(tips).format(prefix=ctx.clean_prefix)
await ctx.channel.send(
self.tip_format.format(content=content, tip_msg=tip_msg, prefix=ctx.clean_prefix)
)
| 33.540404 | 106 | 0.584249 |
e192e2b546e587bd8e511db094a896636c111882 | 506 | py | Python | tests/test_fixtures.py | NuGrid/NuGridPy | 35cab0e7fa5565fbb2f99917715a51e3658dd701 | [
"BSD-3-Clause"
] | 16 | 2016-08-05T18:37:11.000Z | 2021-08-06T20:01:34.000Z | tests/test_fixtures.py | NuGrid/NuGridPy | 35cab0e7fa5565fbb2f99917715a51e3658dd701 | [
"BSD-3-Clause"
] | 59 | 2016-05-19T19:17:41.000Z | 2019-12-12T22:54:05.000Z | tests/test_fixtures.py | NuGrid/NuGridPy | 35cab0e7fa5565fbb2f99917715a51e3658dd701 | [
"BSD-3-Clause"
] | 11 | 2016-05-16T22:37:36.000Z | 2019-07-28T07:33:25.000Z | import unittest
from .fixtures import random_string
class TestFixtures(unittest.TestCase):
"""Class testing the implemented fixtures."""
def test_random_string(self):
"""Test the random_string function."""
num_strings = 1000
string_collection = [
random_string() for _ in range(num_strings)
]
# Make a set to remove duplicates
string_collection = set(string_collection)
self.assertEqual(len(string_collection), num_strings)
| 25.3 | 61 | 0.673913 |
7e6795d5e962f556b9f8ecb269f6d9908190a13a | 9,759 | py | Python | implementation/server/cms/models.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | null | null | null | implementation/server/cms/models.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | null | null | null | implementation/server/cms/models.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | 3 | 2018-10-01T12:04:36.000Z | 2021-01-07T09:30:50.000Z | from django.db import models
from wagtail.admin.edit_handlers import (
FieldPanel,
FieldRowPanel,
InlinePanel,
MultiFieldPanel,
PageChooserPanel,
StreamFieldPanel
)
from wagtail.core.models import Orderable, Page
from wagtail.core.fields import RichTextField
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.core.fields import RichTextField, StreamField
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.snippets.models import register_snippet
from wagtail.contrib.forms.models import AbstractEmailForm, AbstractFormField
from wagtailtrans.models import TranslatablePage
from wagtail.search import index
from .blocks import BaseStreamBlock
class TranslatablePageMixin(models.Model):
# One link for each alternative language
# These should only be used on the main language page (english)
dutch_link = models.ForeignKey(Page, null=True, on_delete=models.SET_NULL, blank=True, related_name='+')
def get_language(self):
"""
This returns the language code for this page.
"""
# Look through ancestors of this page for its language homepage
# The language homepage is located at depth 3
language_homepage = self.get_ancestors(inclusive=True).get(depth=3)
# The slug of language homepages should always be set to the language code
return language_homepage.slug
# Method to find the main language version of this page
# This works by reversing the above links
def english_page(self):
"""
This finds the english version of this page
"""
language = self.get_language()
if language == 'en':
return self
elif language == 'nl':
return type(self).objects.filter(dutch_link=self).first().specific
# We need a method to find a version of this page for each alternative language.
# These all work the same way. They firstly find the main version of the page
# (english), then from there they can just follow the link to the correct page.
def dutch_page(self):
"""
This finds the french version of this page
"""
dutch_page = self.dutch_page()
if dutch_page and dutch_page.dutch_link:
return dutch_page.dutch_link.specific
class Meta:
abstract = True
class HomePageBlock(models.Model):
background_color = models.CharField(max_length=255)
title = models.CharField(max_length=255)
body = models.CharField(max_length=255)
panels = [
FieldPanel('background_color'),
FieldPanel('title'),
FieldPanel('body'),
]
class Meta:
abstract = True
class HomePageBlockLink(Orderable, HomePageBlock):
page = ParentalKey('cms.HomePage', on_delete=models.CASCADE, related_name='homepage_blocks')
class HomePage(TranslatablePage, Page):
template="cms/base/home_page.html"
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Homepage image'
)
hero_text = RichTextField()
hero_cta = models.CharField(
verbose_name='Hero CTA',
max_length=255,
help_text='Text to display on Call to Action',
blank=True
)
hero_cta_link = models.CharField(
verbose_name='Hero Link',
max_length=255,
help_text='Hero Link',
blank=True
)
sub_hero_block = RichTextField(blank=True)
body = StreamField(
BaseStreamBlock(), verbose_name="Home content block", blank=True
)
content_panels = Page.content_panels + [
MultiFieldPanel([
ImageChooserPanel('image'),
FieldPanel('hero_text', classname="full"),
MultiFieldPanel([
FieldPanel('hero_cta'),
FieldPanel('hero_cta_link'),
FieldPanel('sub_hero_block'),
])
], heading="Hero section"),
StreamFieldPanel('body'),
]
def __str__(self):
return self.title
class Meta:
verbose_name_plural = 'Home'
class ContentPage(TranslatablePage, Page):
body = RichTextField(blank=True)
template="cms/base/content_page.html"
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Header image'
)
body = StreamField(
BaseStreamBlock(), verbose_name="Page body", blank=True
)
content_panels = Page.content_panels + [
ImageChooserPanel('image'),
StreamFieldPanel('body'),
]
@register_snippet
class FooterText(models.Model):
"""
This provides editable text for the site footer. Again it uses the decorator
`register_snippet` to allow it to be accessible via the admin. It is made
accessible on the template via a template tag defined in base/templatetags/
navigation_tags.py
"""
body = RichTextField()
panels = [
FieldPanel('body'),
]
def __str__(self):
return "Footer text"
class Meta:
verbose_name_plural = 'Footer Text'
@register_snippet
class CookieTextEN(models.Model):
"""
This provides editable text for the site footer. Again it uses the decorator
`register_snippet` to allow it to be accessible via the admin. It is made
accessible on the template via a template tag defined in base/templatetags/
navigation_tags.py
"""
body = RichTextField()
panels = [
FieldPanel('body'),
]
def __str__(self):
return "Cookie text EN"
class Meta:
verbose_name_plural = 'Cookie text EN'
@register_snippet
class CookieTextNL(models.Model):
"""
This provides editable text for the site footer. Again it uses the decorator
`register_snippet` to allow it to be accessible via the admin. It is made
accessible on the template via a template tag defined in base/templatetags/
navigation_tags.py
"""
body = RichTextField()
panels = [
FieldPanel('body'),
]
def __str__(self):
return "Cookie text NL"
class Meta:
verbose_name_plural = 'Cookie text NL'
class FormField(AbstractFormField):
"""
Wagtailforms is a module to introduce simple forms on a Wagtail site. It
isn't intended as a replacement to Django's form support but as a quick way
to generate a general purpose data-collection form or contact form
without having to write code. We use it on the site for a contact form. You
can read more about Wagtail forms at:
http://docs.wagtail.io/en/latest/reference/contrib/forms/index.html
"""
page = ParentalKey('FormPage', related_name='form_fields', on_delete=models.CASCADE)
class FormPage(TranslatablePage, AbstractEmailForm):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
template="cms/base/form_page.html"
body = StreamField(BaseStreamBlock())
thank_you_text = RichTextField(blank=True)
# Note how we include the FormField object via an InlinePanel using the
# related_name value
content_panels = AbstractEmailForm.content_panels + [
ImageChooserPanel('image'),
StreamFieldPanel('body'),
InlinePanel('form_fields', label="Form fields"),
FieldPanel('thank_you_text', classname="full"),
MultiFieldPanel([
FieldRowPanel([
FieldPanel('from_address', classname="col6"),
FieldPanel('to_address', classname="col6"),
]),
FieldPanel('subject'),
], "Email"),
]
class ExplorePage(Page):
"""
Detail view for a specific Explore options
"""
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Header image'
)
desciption = RichTextField(blank=True)
content_panels = Page.content_panels + [
ImageChooserPanel('image'),
FieldPanel('desciption'),
]
parent_page_types = ['ExploreIndexPage']
class ExploreIndexPage(Page):
"""
Index page for explore options.
This is more complex than other index pages on the bakery demo site as we've
included pagination. We've separated the different aspects of the index page
to be discrete functions to make it easier to follow
"""
# Can only have BreadPage children
subpage_types = ['ExplorePage']
def children(self):
return self.get_children().specific().live()
class PaymenOptionPage(Page):
"""
Detail view for a specific Explore options
"""
options = RichTextField(blank=True)
content_panels = Page.content_panels + [
FieldPanel('options'),
]
parent_page_types = ['PaymenOptionIndexPage']
class PaymenOptionIndexPage(Page):
"""
Index page for payment options.
This is more complex than other index pages on the bakery demo site as we've
included pagination. We've separated the different aspects of the index page
to be discrete functions to make it easier to follow
"""
classname = models.CharField(
max_length=255,
help_text='classname',
blank=True
)
# Can only have BreadPage children
subpage_types = ['PaymenOptionPage']
def children(self):
return self.get_children().specific().live()
| 28.286957 | 108 | 0.66226 |
1fb119595024bda953ec3d32cfcc527dc55a10b9 | 550 | py | Python | units/length/nautical_miles.py | putridparrot/PyUnits | 4f1095c6fc0bee6ba936921c391913dbefd9307c | [
"MIT"
] | null | null | null | units/length/nautical_miles.py | putridparrot/PyUnits | 4f1095c6fc0bee6ba936921c391913dbefd9307c | [
"MIT"
] | null | null | null | units/length/nautical_miles.py | putridparrot/PyUnits | 4f1095c6fc0bee6ba936921c391913dbefd9307c | [
"MIT"
] | null | null | null | # <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
def to_millimetres(value):
return value * 1852000.0
def to_centimetres(value):
return value * 185200.0
def to_metres(value):
return value * 1852.0
def to_kilometres(value):
return value * 1.85200
def to_inches(value):
return value * 72913.4
def to_feet(value):
return value * 6076.12
def to_yards(value):
return value * 2025.37
def to_miles(value):
return value * 1.15078
| 22.916667 | 62 | 0.736364 |
aca20ed6034889ce0409f5bbcdbd01ba21cb6765 | 925 | py | Python | test/test_response_error_type.py | Dangl-IT/avacloud-client-python | 66f555096bbbc87d02d02e4e2dfb0c6accb18f95 | [
"RSA-MD"
] | 1 | 2019-01-12T18:10:24.000Z | 2019-01-12T18:10:24.000Z | test/test_response_error_type.py | Dangl-IT/avacloud-client-python | 66f555096bbbc87d02d02e4e2dfb0c6accb18f95 | [
"RSA-MD"
] | null | null | null | test/test_response_error_type.py | Dangl-IT/avacloud-client-python | 66f555096bbbc87d02d02e4e2dfb0c6accb18f95 | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
AVACloud API 1.17.3
AVACloud API specification # noqa: E501
OpenAPI spec version: 1.17.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import avacloud_client_python
from avacloud_client_python.models.response_error_type import ResponseErrorType # noqa: E501
from avacloud_client_python.rest import ApiException
class TestResponseErrorType(unittest.TestCase):
"""ResponseErrorType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testResponseErrorType(self):
"""Test ResponseErrorType"""
# FIXME: construct object with mandatory attributes with example values
# model = avacloud_client_python.models.response_error_type.ResponseErrorType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.560976 | 101 | 0.718919 |
78d5b2c099da7fafe635a52676f7d07006d96bf5 | 1,697 | py | Python | lec2-2[p102, LL].py | cutz-j/Statistics-for-ML | ef01d52ab82dc00effbc72a87d75ae41745435e9 | [
"MIT"
] | null | null | null | lec2-2[p102, LL].py | cutz-j/Statistics-for-ML | ef01d52ab82dc00effbc72a87d75ae41745435e9 | [
"MIT"
] | null | null | null | lec2-2[p102, LL].py | cutz-j/Statistics-for-ML | ef01d52ab82dc00effbc72a87d75ae41745435e9 | [
"MIT"
] | null | null | null | from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
wine_quality = pd.read_csv("d:/data/wine/winequality-red.csv", sep=';')
wine_quality.rename(columns=lambda x: x.replace(" ", "_"), inplace=True)
columns = ['fixed_acidity', 'volatile_acidity', 'citric_acid', 'residual_sugar',
'chlorides', 'free_sulfur_dioxide', 'total_sulfur_dioxide', 'density',
'pH','sulphates', 'alcohol']
pdx = wine_quality[columns]
pdy = wine_quality['quality']
x_train, x_test, y_train, y_test = train_test_split(pdx, pdy, train_size=0.7, random_state=77)
alphas = [1e-4, 1e-3, 1e-2, 0.1, 0.5, 1.0, 5.0, 10.0]
initrsq = 0
for alph in alphas:
ridge_reg = Ridge(alpha=alph)
ridge_reg.fit(x_train, y_train)
tr_rsqrd = ridge_reg.score(x_train,y_train)
ts_rsqrd = ridge_reg.score(x_test, y_test)
if ts_rsqrd > initrsq:
print("lambda: ", alph, "train r-sq: ", round(tr_rsqrd, 5), "test r-sq :", round(ts_rsqrd, 5))
initrsq = ts_rsqrd
initrsq = 0
for alph in alphas:
lasso_reg = Lasso(alpha=alph)
lasso_reg.fit(x_train, y_train)
tr_rsqrd = lasso_reg.score(x_train, y_train)
ts_rsqrd = lasso_reg.score(x_test, y_test)
if ts_rsqrd > initrsq:
print("lambda: ", alph, "R-sq ", round(tr_rsqrd, 5), "r-sq ", round(ts_rsqrd, 5))
initrsq = ts_rsqrd
ridge_reg = Ridge(alpha=0.01)
ridge_reg.fit(x_train, y_train)
lasso_reg = Lasso(alpha=0.001)
lasso_reg.fit(x_train, y_train)
for i in range(11):
print(columns[i], "ridge:", ridge_reg.coef_[i])
print(columns[i], "lasso:", lasso_reg.coef_[i])
| 36.106383 | 102 | 0.68297 |
f8d230c2c1056c3fe82c9c88bad9893c6f73ad22 | 997 | py | Python | plugin.video.2x2/addon.py | raitonoberu/plugin.video.2x2 | b400348a9f02830b4255944a356ab358ffd89148 | [
"MIT"
] | 1 | 2022-01-22T14:27:54.000Z | 2022-01-22T14:27:54.000Z | plugin.video.2x2/addon.py | raitonoberu/plugin.video.2x2 | b400348a9f02830b4255944a356ab358ffd89148 | [
"MIT"
] | null | null | null | plugin.video.2x2/addon.py | raitonoberu/plugin.video.2x2 | b400348a9f02830b4255944a356ab358ffd89148 | [
"MIT"
] | null | null | null | import xbmcgui
import xbmcplugin
import xbmcaddon
import requests
api_url = "https://uma.media/api/play/options/dcab9b90a33239837c0f71682d6606da/?format=json"
resolutions = ["1280x720", "1024x576", "640x360"]
addon = xbmcaddon.Addon()
response = requests.get(api_url, headers={"Referer": "https://online.2x2tv.ru/"})
if response:
hls_url = response.json()["live_streams"]["hls"][0]["url"]
playlist = requests.get(hls_url).text
streams = list(filter(lambda i: i.startswith("http"), playlist.split("\n")))
number_of_servers = len(streams) / len(resolutions)
for i in range(len(streams)):
item = xbmcgui.ListItem("2x2 " + resolutions[int(i / number_of_servers)])
xbmcplugin.addDirectoryItem(int(sys.argv[1]), streams[i], item, isFolder=0)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
else:
dialog = xbmcgui.Dialog()
ok = dialog.ok("Error", "{0}: {1}".format(response.status_code, response.json()['detail']['languages'][-1]['title'].encode('utf-8')))
| 41.541667 | 137 | 0.695085 |
759878a70edbf5adbe428450e79879045c3e7c15 | 4,797 | py | Python | saveScreenShot/cropImage.py | guncys-inc/saveSnapShot | 1912fe49ebe945cd9490326c0a803ed479263a92 | [
"MIT"
] | 8 | 2017-12-04T01:10:44.000Z | 2019-12-22T05:10:06.000Z | saveScreenShot/cropImage.py | guncys-inc/saveSnapShot | 1912fe49ebe945cd9490326c0a803ed479263a92 | [
"MIT"
] | null | null | null | saveScreenShot/cropImage.py | guncys-inc/saveSnapShot | 1912fe49ebe945cd9490326c0a803ed479263a92 | [
"MIT"
] | null | null | null | from .Qt import QtWidgets, QtGui, QtCore
import os
class CropImage(QtWidgets.QDialog):
def __init__(self, imgPath="", outPath="", parent=None):
super(CropImage, self).__init__(parent=parent)
self.__image = None
self.__img_path = None
self.__out_path = None
self.__rect_start = QtCore.QPoint(0, 0)
self.__rect_end = QtCore.QPoint(0, 0)
self.__draw_rect = False
self.__show_desc = True
self.__qt_trns_color = QtGui.QColor(0, 0, 0, 125)
self.__qt_rect_pen = QtGui.QPen(QtCore.Qt.gray)
self.__qt_rect_pen.setStyle(QtCore.Qt.DashLine)
self.__qt_rect_pen.setWidth(3)
self.__qt_white_pen = QtGui.QPen(QtCore.Qt.white)
self.__qt_font = QtGui.QFont("Arial", 16)
self.setImage(imgPath)
self.setOutPath(outPath)
@staticmethod
def RunCropImage(imgPath, outPath, parent=None):
dial = CropImage(imgPath, outPath, parent=parent)
return dial.exec_()
def setOutPath(self, path):
self.__out_path = path
def setImage(self, path):
self.__img_path = path
self.__image = QtGui.QImage(self.__img_path)
self.__resetWidget()
def __resetWidget(self):
size = self.__image.size()
if size.width() < 1 or size.height() < 1:
size = QtCore.QSize(10, 10)
self.setFixedSize(size)
def __saveImage(self):
if self.__out_path:
if self.__draw_rect:
cropped = self.__image.copy(*self.__getDrawRectTuple())
cropped.save(self.__out_path)
else:
self.__image.save(self.__out_path)
def __getDrawRectTuple(self):
i_size = self.__image.size()
iw = i_size.width()
ih = i_size.height()
if self.__rect_start.x() <= self.__rect_end.x():
x1 = self.__rect_start.x()
x2 = self.__rect_end.x()
else:
x1 = self.__rect_end.x()
x2 = self.__rect_start.x()
if self.__rect_start.y() <= self.__rect_end.y():
y1 = self.__rect_start.y()
y2 = self.__rect_end.y()
else:
y1 = self.__rect_end.y()
y2 = self.__rect_start.y()
rx = x1 if x1 >= 0 else 0
rw = (x2 if x2 <= iw else iw) - rx
ry = y1 if y1 >= 0 else 0
rh = (y2 if y2 <= ih else ih) - ry
return (rx, ry, rw, rh)
def keyPressEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Escape:
self.reject()
elif key == QtCore.Qt.Key_Return or key == QtCore.Qt.Key_Enter:
self.__saveImage()
self.accept()
def mousePressEvent(self, event):
if event.button() is QtCore.Qt.LeftButton:
self.__draw_rect = True
self.__show_desc = False
self.__rect_start = event.pos()
self.__rect_end = event.pos()
def mouseReleaseEvent(self, event):
if event.button() is QtCore.Qt.LeftButton:
self.__rect_end = event.pos()
if (self.__rect_end - self.__rect_start).manhattanLength() < 10:
self.__draw_rect = False
self.update()
def mouseMoveEvent(self, event):
self.__rect_end = event.pos()
self.update()
def paintEvent(self, event):
super(CropImage, self).paintEvent(event)
if self.__image is not None:
p = QtGui.QPainter(self)
i_size = self.__image.size()
p.drawImage(QtCore.QRect(0, 0, i_size.width(), i_size.height()), self.__image)
iw = i_size.width()
ih = i_size.height()
if self.__show_desc:
p.setPen(self.__qt_rect_pen)
p.setFont(self.__qt_font)
p.fillRect(0, 0, iw, ih, self.__qt_trns_color)
p.drawRect(iw * 0.1, ih * 0.1, iw * 0.8, ih * 0.8)
p.setPen(self.__qt_white_pen)
p.drawText(QtCore.QRect(0, 0, iw, ih), QtCore.Qt.AlignCenter, "Select Capture Area\nPress Enter to save\nPress Escape to cancel")
elif self.__draw_rect:
p.setPen(self.__qt_rect_pen)
rx, ry, rw, rh = self.__getDrawRectTuple()
dx = rx + rw
dy = ry + rh
if rx > 1:
p.fillRect(0, 0, rx, ih, self.__qt_trns_color)
if dx < (iw - 1):
p.fillRect(dx, 0, iw, ih, self.__qt_trns_color)
if ry > 1:
p.fillRect(rx, 0, rw, ry - 1, self.__qt_trns_color)
if dy < (ih - 1):
p.fillRect(rx, dy, rw, ih, self.__qt_trns_color)
p.drawRect(QtCore.QRect(rx, ry, rw, rh))
| 33.082759 | 145 | 0.549719 |
6df00b031e5ab00ef6a2695c9fc4f2a18a91daec | 602 | py | Python | 1010_pairs_of_songs.py | claytonjwong/leetcode-py | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | [
"MIT"
] | 1 | 2020-07-15T14:16:23.000Z | 2020-07-15T14:16:23.000Z | 1010_pairs_of_songs.py | claytonjwong/leetcode-py | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | [
"MIT"
] | null | null | null | 1010_pairs_of_songs.py | claytonjwong/leetcode-py | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | [
"MIT"
] | null | null | null | #
# 1010. Pairs of Songs With Total Durations Divisible by 60
#
# Q: https://leetcode.com/problems/pairs-of-songs-with-total-durations-divisible-by-60/
# A: https://leetcode.com/problems/pairs-of-songs-with-total-durations-divisible-by-60/discuss/256716/Kt-Js-Py3-Cpp-Map-of-Buckets
#
from typing import List
class Solution:
def numPairsDivisibleBy60(self, A: List[int], pairs = 0) -> int:
m = {}
A = [x % 60 for x in A]
for x in A:
y = (60 - x) % 60
pairs += m[y] if y in m else 0
m[x] = 1 + m[x] if x in m else 1
return pairs
| 31.684211 | 130 | 0.607973 |
cc804e89c7de170e48736f7f0141f3abe57e9952 | 12,305 | py | Python | seoperator2_tests.py | 180254/scheduled-events-operator-2 | 9c0720017eac46d97b28e28ac34d63d4768e31dd | [
"MIT"
] | null | null | null | seoperator2_tests.py | 180254/scheduled-events-operator-2 | 9c0720017eac46d97b28e28ac34d63d4768e31dd | [
"MIT"
] | null | null | null | seoperator2_tests.py | 180254/scheduled-events-operator-2 | 9c0720017eac46d97b28e28ac34d63d4768e31dd | [
"MIT"
] | null | null | null | #!/usr/bin/python3 -u
import unittest
from seoperator2 import ThisHostnames, ScheduledEvent, ProcessingRule, ProcessingRuleProcessor
class ThisHostnamesMock(ThisHostnames):
# noinspection PyMissingConstructor
def __init__(self, compute_name: str, node_name: str) -> None:
self.compute_name: str = compute_name
self.node_name: str = node_name
class ScheduledEventMock(ScheduledEvent):
# noinspection PyMissingConstructor
def __init__(self, eventtype: str, durationinseconds: int) -> None:
self.eventtype: str = eventtype
self.durationinseconds: int = durationinseconds
class ProcessingRuleProcessorGeneralTests(unittest.TestCase):
def test_eventype_not_matched(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Freeze"]
})
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Reboot", -1)
)
self.assertEqual(res, True)
def test_stop_on_first_matched_rule(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Reboot", "Redeploy", "Freeze", "Preempt", "Terminate"],
"and-node-name-matches": "*-nodepool_1-*"
}),
ProcessingRule({
"rule-type": "handle-event-if",
"event-type-is": ["Reboot", "Redeploy", "Freeze", "Preempt", "Terminate"],
"and-node-name-matches": "*-nodepool_2-*"
}),
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Freeze"]
})
],
ThisHostnamesMock("aks-nodepool_2-36328368-vmss_18",
"aks-nodepool_2-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", -1)
)
self.assertEqual(res, True)
def test_no_rules_matches(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Reboot", "Redeploy", "Freeze", "Preempt", "Terminate"],
"and-node-name-matches": "*-nodepool_1-*"
}),
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Freeze"],
"and-node-name-matches": "*-nodepool_2-*"
})
],
ThisHostnamesMock("aks-nodepool_3-36328368-vmss_18",
"aks-nodepool_3-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", -1)
)
self.assertEqual(res, True)
def test_no_rules_at_all(self):
res = ProcessingRuleProcessor(
[],
ThisHostnamesMock("aks-nodepool_3-36328368-vmss_18",
"aks-nodepool_3-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", 5)
)
self.assertEqual(res, True)
class ProcessingRuleProcessorDurationLessEqualToTests(unittest.TestCase):
def test_ignore_if_duration_less_equal_to_unknown_duration_case(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-less-equal-to": 10
})
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", -1)
)
self.assertEqual(res, True)
def test_handle_if_duration_less_equal_to_unknown_duration_case(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "handle-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-less-equal-to": 10
}),
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Reboot", "Redeploy", "Freeze", "Preempt", "Terminate"],
}),
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", -1)
)
self.assertEqual(res, False)
def test_ignore_if_duration_less_equal_to_short_duration(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-less-equal-to": 10
})
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", 10)
)
self.assertEqual(res, False)
def test_handle_if_duration_less_equal_to_short_duration(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "handle-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-less-equal-to": 10
}),
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Reboot", "Redeploy", "Freeze", "Preempt", "Terminate"],
}),
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", 10)
)
self.assertEqual(res, True)
def test_ignore_if_duration_less_equal_to_long_duration_case(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-less-equal-to": 10
})
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", 30)
)
self.assertEqual(res, True)
def test_handle_if_duration_less_equal_to_long_duration_case(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "handle-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-less-equal-to": 10
}),
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Reboot", "Redeploy", "Freeze", "Preempt", "Terminate"],
}),
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", 30)
)
self.assertEqual(res, False)
class ProcessingRuleProcessorDurationGreaterEqualToTests(unittest.TestCase):
def test_ignore_if_duration_greater_equal_to_unknown_duration_case(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-greater-equal-to": 10
})
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", -1)
)
self.assertEqual(res, False)
def test_handle_if_duration_greater_equal_to_unknown_duration_case(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "handle-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-greater-equal-to": 10
}),
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Reboot", "Redeploy", "Freeze", "Preempt", "Terminate"],
}),
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", -1)
)
self.assertEqual(res, True)
def test_ignore_if_duration_greater_equal_to_short_duration(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-greater-equal-to": 10
})
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", 10)
)
self.assertEqual(res, False)
def test_handle_if_duration_greater_equal_to_short_duration(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "handle-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-greater-equal-to": 10
}),
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Reboot", "Redeploy", "Freeze", "Preempt", "Terminate"],
}),
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", 10)
)
self.assertEqual(res, True)
def test_ignore_if_duration_greater_equal_to_long_duration_case(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-greater-equal-to": 10
})
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", 30)
)
self.assertEqual(res, False)
def test_handle_if_duration_greater_equal_to_long_duration_case(self):
res = ProcessingRuleProcessor(
[
ProcessingRule({
"rule-type": "handle-event-if",
"event-type-is": ["Freeze"],
"and-duration-in-seconds-greater-equal-to": 10
}),
ProcessingRule({
"rule-type": "ignore-event-if",
"event-type-is": ["Reboot", "Redeploy", "Freeze", "Preempt", "Terminate"],
}),
],
ThisHostnamesMock("aks-default-36328368-vmss_18",
"aks-default-36328368-vmss00000i")
).all_considered_should_handle(
ScheduledEventMock("Freeze", 30)
)
self.assertEqual(res, True)
if __name__ == "__main__":
unittest.main()
| 38.333333 | 94 | 0.520114 |
5897e9e1f2509b93d6122c2bd00543b0d4eabb17 | 2,183 | py | Python | test-framework/test-suites/integration/tests/remove/test_remove_cart.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 123 | 2015-05-12T23:36:45.000Z | 2017-07-05T23:26:57.000Z | test-framework/test-suites/integration/tests/remove/test_remove_cart.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 177 | 2015-06-05T19:17:47.000Z | 2017-07-07T17:57:24.000Z | test-framework/test-suites/integration/tests/remove/test_remove_cart.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 32 | 2015-06-07T02:25:03.000Z | 2017-06-23T07:35:35.000Z | import json
from textwrap import dedent
class TestRemoveCart:
def test_no_args(self, host):
result = host.run('stack remove cart')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "cart" argument is required
{cart ...}
''')
def test_invalid(self, host):
result = host.run('stack remove cart test')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "test" argument is not a valid cart
{cart ...}
''')
def test_single_arg(self, host, add_cart, revert_etc, revert_export_stack_carts):
# Confirm the test cart is there
result = host.run('stack list cart test output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'test',
'boxes': ''
}
]
# And it has a cart directory
assert host.file('/export/stack/carts/test').is_directory
# Now remove it
result = host.run('stack remove cart test')
assert result.rc == 0
# And confirm it is gone
result = host.run('stack list cart test')
assert result.rc == 255
assert result.stderr.startswith('error - "test" argument is not a valid cart')
# Files should be gone too
assert not host.file('/export/stack/carts/test').exists
def test_multiple_args(self, host, add_cart, revert_etc, revert_export_stack_carts):
# Create a second cart
add_cart('foo')
# Confirm both carts are there
result = host.run('stack list cart test foo output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'test',
'boxes': ''
},
{
'name': 'foo',
'boxes': ''
}
]
# Now remove our carts
result = host.run('stack remove cart test foo')
assert result.rc == 0
# And confirm they are gone
result = host.run('stack list cart test')
assert result.rc == 255
assert result.stderr.startswith('error - "test" argument is not a valid cart')
result = host.run('stack list cart foo')
assert result.rc == 255
assert result.stderr.startswith('error - "foo" argument is not a valid cart')
# Files should be gone too
assert not host.file('/export/stack/carts/test').exists
assert not host.file('/export/stack/carts/foo').exists
| 26.621951 | 85 | 0.669721 |
5c3ad6276b5642f3386b0bab1ac43a7bd8de55d4 | 430 | py | Python | venv/Scripts/pip3.7-script.py | MarcusJBell/PyGameOfLife | 625c3cc0ca543d5b4403e003ee1d981de90358b4 | [
"Apache-2.0"
] | null | null | null | venv/Scripts/pip3.7-script.py | MarcusJBell/PyGameOfLife | 625c3cc0ca543d5b4403e003ee1d981de90358b4 | [
"Apache-2.0"
] | null | null | null | venv/Scripts/pip3.7-script.py | MarcusJBell/PyGameOfLife | 625c3cc0ca543d5b4403e003ee1d981de90358b4 | [
"Apache-2.0"
] | null | null | null | #!E:\Users\sinti\Documents\Projects\Python\gameoflife\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| 33.076923 | 77 | 0.674419 |
890c6b739c0097be91cd0cbd6ff7dadfff6e9812 | 8,899 | py | Python | test/shed_functional/functional/test_1170_prior_installation_required.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 6 | 2018-11-03T22:43:35.000Z | 2022-02-15T17:51:33.000Z | test/shed_functional/functional/test_1170_prior_installation_required.py | igorhollaender/OBSOLETE_sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
] | 7 | 2016-12-07T22:19:37.000Z | 2019-01-30T15:04:26.000Z | test/shed_functional/functional/test_1170_prior_installation_required.py | igorhollaender/OBSOLETE_sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
] | 10 | 2017-04-10T21:40:22.000Z | 2022-02-21T16:50:10.000Z | import logging
from shed_functional.base.twilltestcase import common, ShedTwillTestCase
log = logging.getLogger( __name__ )
column_repository_name = 'column_maker_0150'
column_repository_description = "Add column"
column_repository_long_description = "Compute an expression on every row"
convert_repository_name = 'convert_chars_0150'
convert_repository_description = "Convert delimiters"
convert_repository_long_description = "Convert delimiters to tab"
category_name = 'Test 0150 Simple Prior Installation'
category_description = 'Test 0150 Simple Prior Installation'
'''
Create column_maker and convert_chars.
Column maker repository dependency:
<repository toolshed="self.url" name="convert_chars" owner="test" changeset_revision="c3041382815c" prior_installation_required="True" />
Verify display.
Galaxy side:
Install column_maker.
Verify that convert_chars was installed first, contrary to the ordering that would be present without prior_installation_required.
'''
running_standalone = False
class TestSimplePriorInstallation( ShedTwillTestCase ):
'''Test features related to datatype converters.'''
def test_0000_initiate_users( self ):
"""Create necessary user accounts."""
self.galaxy_login( email=common.admin_email, username=common.admin_username )
galaxy_admin_user = self.test_db_util.get_galaxy_user( common.admin_email )
assert galaxy_admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
self.test_db_util.get_galaxy_private_role( galaxy_admin_user )
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
self.test_db_util.get_private_role( test_user_1 )
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
self.test_db_util.get_private_role( admin_user )
def test_0005_create_convert_repository( self ):
'''Create and populate convert_chars_0150.'''
global running_standalone
category = self.create_category( name=category_name, description=category_description )
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
repository = self.get_or_create_repository( name=convert_repository_name,
description=convert_repository_description,
long_description=convert_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
running_standalone = True
self.upload_file( repository,
filename='convert_chars/convert_chars.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded convert_chars tarball.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_create_column_repository( self ):
'''Create and populate convert_chars_0150.'''
global running_standalone
category = self.create_category( name=category_name, description=category_description )
repository = self.get_or_create_repository( name=column_repository_name,
description=column_repository_description,
long_description=column_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if running_standalone:
self.upload_file( repository,
filename='column_maker/column_maker.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded column_maker tarball.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0015_create_repository_dependency( self ):
'''Create a repository dependency specifying convert_chars.'''
'''
Column maker repository dependency:
<repository toolshed="self.url" name="convert_chars" owner="test" changeset_revision="<tip>" prior_installation_required="True" />
'''
global running_standalone
column_repository = self.test_db_util.get_repository_by_name_and_owner( column_repository_name, common.test_user_1_name )
convert_repository = self.test_db_util.get_repository_by_name_and_owner( convert_repository_name, common.test_user_1_name )
if running_standalone:
dependency_xml_path = self.generate_temp_path( 'test_1150', additional_paths=[ 'column' ] )
convert_tuple = ( self.url, convert_repository.name, convert_repository.user.username, self.get_repository_tip( convert_repository ) )
self.create_repository_dependency( repository=column_repository,
repository_tuples=[ convert_tuple ],
filepath=dependency_xml_path,
prior_installation_required=True )
def test_0020_verify_repository_dependency( self ):
'''Verify that the previously generated repositiory dependency displays correctly.'''
column_repository = self.test_db_util.get_repository_by_name_and_owner( column_repository_name, common.test_user_1_name )
convert_repository = self.test_db_util.get_repository_by_name_and_owner( convert_repository_name, common.test_user_1_name )
self.check_repository_dependency( repository=column_repository,
depends_on_repository=convert_repository,
depends_on_changeset_revision=None,
changeset_revision=None )
def test_0025_install_column_repository( self ):
'''Install column_maker_0150.'''
self.galaxy_login( email=common.admin_email, username=common.admin_username )
column_repository = self.test_db_util.get_repository_by_name_and_owner( column_repository_name, common.test_user_1_name )
preview_strings_displayed = [ 'column_maker_0150', self.get_repository_tip( column_repository ) ]
strings_displayed = [ 'Choose the tool panel section' ]
self.install_repository( column_repository_name,
common.test_user_1_name,
category_name,
install_tool_dependencies=False,
install_repository_dependencies=True,
preview_strings_displayed=preview_strings_displayed,
strings_displayed=strings_displayed,
strings_not_displayed=[],
post_submit_strings_displayed=[ 'column_maker_0150', 'New' ],
includes_tools_for_display_in_tool_panel=True )
def test_0030_verify_installation_order( self ):
'''Verify that convert_chars_0150 was installed before column_maker_0150.'''
column_repository = self.test_db_util.get_installed_repository_by_name_owner( column_repository_name, common.test_user_1_name )
convert_repository = self.test_db_util.get_installed_repository_by_name_owner( convert_repository_name, common.test_user_1_name )
# Column maker was selected for installation, so convert chars should have been installed first, as reflected by the update_time field.
assert column_repository.update_time > convert_repository.update_time, 'Error: column_maker_0150 shows an earlier update time than convert_chars_0150'
| 60.952055 | 158 | 0.65086 |
a6ae869965e5c72329692f315bb2d4c82229cbc7 | 170 | py | Python | urls_local.py | chrislombaard/cryptopuppet | fcfb6311550ae9846dc116ed36f37406f6aabef1 | [
"MIT"
] | 6 | 2017-06-30T15:52:05.000Z | 2017-12-09T16:43:08.000Z | urls_local.py | webclinic017/cryptopuppet | fcfb6311550ae9846dc116ed36f37406f6aabef1 | [
"MIT"
] | 3 | 2017-06-02T05:54:25.000Z | 2021-05-06T16:02:20.000Z | urls_local.py | webclinic017/cryptopuppet | fcfb6311550ae9846dc116ed36f37406f6aabef1 | [
"MIT"
] | 2 | 2017-07-03T23:44:56.000Z | 2021-11-10T23:36:46.000Z | from project.urls import *
if project.settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r"^django-toolbar/", include(debug_toolbar.urls)),
]
| 21.25 | 62 | 0.670588 |
2851031c5436a10f3436a7c4dcfca907145f6507 | 3,324 | py | Python | exam/migrations/0001_initial.py | Lukmanhakim112/ppdb | c6179478b4c1f0b6cec77a8a059a6e418f6263f1 | [
"MIT"
] | null | null | null | exam/migrations/0001_initial.py | Lukmanhakim112/ppdb | c6179478b4c1f0b6cec77a8a059a6e418f6263f1 | [
"MIT"
] | null | null | null | exam/migrations/0001_initial.py | Lukmanhakim112/ppdb | c6179478b4c1f0b6cec77a8a059a6e418f6263f1 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-01-09 06:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import exam.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer_text', models.CharField(max_length=120, verbose_name='Jawaban')),
('answer_image', models.ImageField(blank=True, null=True, upload_to=exam.models.answer_directory_path, verbose_name='Gambar Jawaban')),
('is_right', models.BooleanField(default=False, verbose_name='Benar')),
],
),
migrations.CreateModel(
name='Exam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('passcode', models.CharField(max_length=20, verbose_name='Passcode')),
('exam_title', models.CharField(max_length=100, verbose_name='Judul Ujian')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=120, verbose_name='Pertanyaan')),
('question_image', models.ImageField(blank=True, null=True, upload_to=exam.models.question_directory_path, verbose_name='Gambar Pertanyaan')),
('exam', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.exam')),
],
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.IntegerField(null=True, verbose_name='Score')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='exam.answer')),
('exam', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='exam.exam')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='exam.question')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.question'),
),
]
| 48.173913 | 158 | 0.619134 |
df54e74d8a8c5800d8c89ef5c053c6de6d7e6674 | 367 | py | Python | django_app/frontend/urls.py | tmunzer/mist_installer_web_ui | cfc1c28084997956fc2ff8c4cc065171cb97f16d | [
"MIT"
] | null | null | null | django_app/frontend/urls.py | tmunzer/mist_installer_web_ui | cfc1c28084997956fc2ff8c4cc065171cb97f16d | [
"MIT"
] | null | null | null | django_app/frontend/urls.py | tmunzer/mist_installer_web_ui | cfc1c28084997956fc2ff8c4cc065171cb97f16d | [
"MIT"
] | 1 | 2021-07-15T17:00:25.000Z | 2021-07-15T17:00:25.000Z | from django.urls import path
from django.conf.urls import handler404
from . import views
urlpatterns = [
path(r'', views.index.as_view(), name='index'),
path(r'login', views.index.as_view(), name='index'),
path(r'select', views.index.as_view(), name='index'),
path(r'dashboard', views.index.as_view(), name='index'),
]
handler404 = views.handler404 | 28.230769 | 60 | 0.689373 |
b405b5aff731daa40d3c037ef3e89f3a7f28f69e | 5,319 | py | Python | src/ekpmeasure/control/moke/main.py | marwahaha/ekpmeasure | 8a0e52f0393849b496d9df13d84fcef11b733f59 | [
"MIT"
] | null | null | null | src/ekpmeasure/control/moke/main.py | marwahaha/ekpmeasure | 8a0e52f0393849b496d9df13d84fcef11b733f59 | [
"MIT"
] | null | null | null | src/ekpmeasure/control/moke/main.py | marwahaha/ekpmeasure | 8a0e52f0393849b496d9df13d84fcef11b733f59 | [
"MIT"
] | null | null | null | from mcculw import ul
from mcculw.enums import ULRange, InfoType, AnalogInputMode
from mcculw.enums import ScanOptions, BoardInfo, TriggerEvent, TrigType, FunctionType
from mcculw.ul import ULError
import ctypes
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from warnings import warn
import os
from ..instruments.USB_1208HS_4AO import *
__all__ = ('RUN',)
class RUN():
def __init__(self, daq):
self.daq = daq.configure()
self.board_num = daq.board_num
self.ul_range = daq.ul_range
return
def load_waveform(self,wvfrm_vstack):
"""load a waveform for daq
----
wvfrm_stack: numpy.vstack
"""
wf_1d, nzeros_front, nzeros_back = waveforms_to_1d_array(wvfrm_vstack)
self.wf_1d = wf_1d
self.nzeros_front = nzeros_front
self.nzeros_back = nzeros_back
self.input_wfm_df = pd.DataFrame({i:wvfrm_vstack[i,:] for i in range(wvfrm_vstack.shape[0])})
def config(self, out_channel_start,out_channel_end,in_channel_start,in_channel_end,nave,quiet = False):
"""configure run
----
out_channel_start: int, specify which start channel to output waveform
out_channel_end: int
in_channel_start: int
in_channel_end: int
"""
self.out_channel_end = out_channel_end
self.out_channel_start = out_channel_start
self.in_channel_end = in_channel_end
self.in_channel_start = in_channel_start
self.nave = nave
self.quiet = quiet
def go(self):
"""start the run"""
to_average = []
#stop old processes in case
ul.stop_background(self.board_num, FunctionType.AOFUNCTION)
ul.stop_background(self.board_num, FunctionType.AIFUNCTION)
nchannels_out = self.out_channel_end - self.out_channel_start + 1
for i in range(self.nave):
returned = apply_and_listen(self.wf_1d, self.nzeros_front, self.nzeros_back,
in_channel_start=self.in_channel_start, in_channel_end=self.in_channel_end,
out_channel_start=self.out_channel_start, out_channel_end=self.out_channel_end,
board_num=self.board_num, quiet=self.quiet)
memhandle_in, memhandle_out, data_array_in, data_array_out, count_in, time = returned
# Free the buffer and set the data_array to None
ul.win_buf_free(memhandle_out)
data_array_out = None
#now that it is done convert from data_array back to numpy data:
out = []
for i in range(0, count_in):
out.append(ul.to_eng_units(self.board_num, self.ul_range, data_array_in[i]))
out = np.array(out)
#clear memory
ul.win_buf_free(memhandle_in)
data_array_in = None
#append data
to_average.append(out)
data = np.array(to_average)
means = np.mean(data, axis = 0)
out = waveform_1d_to_array(means, nchannels_in=nchannels_out)
self.waveform_collected = out
self.time = time
return
def plot(self,**kwargs):
"""plot waveform_collected"""
if not hasattr(self, 'time'):
raise AttributeError('no data has been collected, suggest self.go()')
return
fig, ax = plt.subplots(**kwargs)
for i in range(self.waveform_collected.shape[0]):
ax.plot(self.time*1e6, self.waveform_collected[i,:])
ax.set_xlabel('time (us)')
return fig, ax
def get_df(self):
"""return pandas dataframe of waveform_collected"""
if not hasattr(self, 'waveform_collected'):
raise AttributeError('no data has been collected, suggest self.go()')
nchannels_in = self.in_channel_end - self.in_channel_start + 1
#for time so divide by how many channels in
nzeros_front_for_time = int(self.nzeros_front/nchannels_in)
nzeros_back_for_time = int(self.nzeros_back/nchannels_in)
time = self.time[nzeros_front_for_time:-nzeros_back_for_time]
data = pd.DataFrame({
'time':time,
})
warn("You are getting the input wfm data (which is perfect), not measured current input to the coil")
for i in self.input_wfm_df:
data['AOUT_{}'.format(i)] = 10*(self.input_wfm_df[i]-2047)/2047
for i, x in enumerate(self.waveform_collected):
x_for_data = x[nzeros_front_for_time:-nzeros_back_for_time]
data['AIN_{}'.format(i)] = x_for_data
self.data = data
return
def save(self, path, name):
"""save waveform_collected too file"""
if not hasattr(self, 'data'):
self.get_df(self)
#check if file name exists:
file_set = set(os.listdir(path))
if name in file_set:
yn = input('file already exists. Overwrite? (y/n)')
if yn == 'y':
self.data.to_csv(path+name, index = False)
else:
print('Ok. Skipping.')
else:
self.data.to_csv(path+name, index = False)
return | 35.697987 | 119 | 0.613461 |
a79aac840ee10f7c5cfac57ee7a5954bdb374dcb | 4,718 | py | Python | oxdashboard/settings.py | beta-nu-theta-chi/ox-dashboard | 842d86a381f26159b2c5bad39a95169496832023 | [
"MIT"
] | null | null | null | oxdashboard/settings.py | beta-nu-theta-chi/ox-dashboard | 842d86a381f26159b2c5bad39a95169496832023 | [
"MIT"
] | 70 | 2016-11-16T18:49:02.000Z | 2021-04-26T00:47:18.000Z | oxdashboard/settings.py | beta-nu-theta-chi/ox-dashboard | 842d86a381f26159b2c5bad39a95169496832023 | [
"MIT"
] | null | null | null | """
Django settings for oxdashboard project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import yaml
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if os.environ.get('SECRET_KEY'):
SECRET_KEY = os.environ.get('SECRET_KEY')
else:
SECRET_KEY = "841dp9w*!q+o19&k28!r9-u^sc0mw0vc(qt_x#v*svs^e*_0*!i"
# SECURITY WARNING: don't run with debug turned on in production!
if os.environ.get('DEBUG'):
DEBUG = os.environ.get('DEBUG') != 'False'
else:
DEBUG = True
if os.environ.get('_DEBUG'):
_DEBUG = False
else:
_DEBUG = True
if os.environ.get('DJANGO_ALLOWED_HOSTS'):
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS").split(" ")
else:
ALLOWED_HOSTS = [
'*',
]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dashboard',
'cas',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'cas.middleware.CASMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'oxdashboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'oxdashboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
if DEBUG and _DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join('/db/db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static-root/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static-root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# EMAIL
EMAIL_HOST = 'smtp.case.edu'
EMAIL_PORT = 25
# CAS
CAS_SERVER_URL = "https://login.case.edu/cas/"
# So after migrating to python 3, the following needed to be commented out
# not really sure why, so Im leaving this here as a note to future maintainers - jcassarly
# also the new versions of CAS I think require the CAS_AUTO_CREATE_USERS to be true
# CAS_VERSION = '1'
CAS_AUTO_CREATE_USERS = True
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'cas.backends.CASBackend',
)
LOGGING = {
'version': 1,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'WARNING',
},
'cas.backends': {
'handlers': ['console'],
'level': 'WARNING',
},
},
}
THURSDAY_DETAILS = []
SUNDAY_DETAILS = []
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
with open(os.path.join(BASE_DIR, 'oxdashboard/sunday_details.yaml')) as f:
SUNDAY_DETAILS = yaml.full_load(f)
with open(os.path.join(BASE_DIR, 'oxdashboard/thursday_details.yaml')) as f:
THURSDAY_DETAILS = yaml.full_load(f)
| 25.502703 | 90 | 0.664476 |
be085d5c6e4eeda253477cedf60c543cfbf59d09 | 2,411 | py | Python | bulbs/promotion/operations.py | TAPP-TV/django-bulbs | eafe2237702d63d70f668adf45af20245b1b28e9 | [
"MIT"
] | null | null | null | bulbs/promotion/operations.py | TAPP-TV/django-bulbs | eafe2237702d63d70f668adf45af20245b1b28e9 | [
"MIT"
] | null | null | null | bulbs/promotion/operations.py | TAPP-TV/django-bulbs | eafe2237702d63d70f668adf45af20245b1b28e9 | [
"MIT"
] | null | null | null | from django.db import models
from polymorphic import PolymorphicModel
class ContentListOperation(PolymorphicModel):
class Meta:
ordering = ["-when"]
content_list = models.ForeignKey("promotion.ContentList", related_name="operations")
when = models.DateTimeField()
applied = models.BooleanField(default=False)
def apply(self, data):
raise NotImplemented()
class InsertOperation(ContentListOperation):
index = models.IntegerField(default=0)
content = models.ForeignKey("content.Content", related_name="+")
lock = models.BooleanField(default=False)
def apply(self, data):
next = {
"id": self.content.pk,
"lock": self.lock
}
for i in range(self.index, min(len(data), 100)):
if data[i].get("lock", False):
continue
next, data[i] = data[i], next # Swap them
data.append(next)
return data
class ReplaceOperation(ContentListOperation):
content = models.ForeignKey("content.Content", related_name="+")
target = models.ForeignKey("content.Content", related_name="+")
lock = models.BooleanField(default=False)
def apply(self, data):
replace = {
"id": self.content.pk,
"lock": self.lock
}
for index, item in enumerate(data):
if item["id"] == self.target.pk:
if item.get("lock", False):
raise Exception("That item is locked!")
data[index] = replace
break
else:
raise Exception("No content in list!")
return data
class LockOperation(ContentListOperation):
target = models.ForeignKey("content.Content", related_name="+")
def apply(self, data):
for index, item in enumerate(data):
if item["id"] == self.target.pk:
data[index]["lock"] = True
break
else:
raise Exception("No content in list!")
return data
class UnlockOperation(ContentListOperation):
target = models.ForeignKey("content.Content", related_name="+")
def apply(self, data):
for index, item in enumerate(data):
if item["id"] == self.target.pk:
data[index]["lock"] = False
break
else:
raise Exception("No content in list!")
return data
| 28.034884 | 88 | 0.585234 |
ecc70fcb366946aae17781b47add46bb9f22dbf9 | 1,814 | py | Python | blog/urls.py | miguelgfierro/sciblog | f43b3b5b514ad5407cb7c73f5f57caea6acb6fc6 | [
"BSD-3-Clause"
] | 110 | 2016-11-25T14:25:10.000Z | 2022-02-16T08:25:57.000Z | blog/urls.py | Pandinosaurus/sciblog | f43b3b5b514ad5407cb7c73f5f57caea6acb6fc6 | [
"BSD-3-Clause"
] | 86 | 2016-11-13T10:04:07.000Z | 2022-03-11T23:14:01.000Z | blog/urls.py | Pandinosaurus/sciblog | f43b3b5b514ad5407cb7c73f5f57caea6acb6fc6 | [
"BSD-3-Clause"
] | 21 | 2016-12-06T15:03:44.000Z | 2021-12-30T11:38:19.000Z | from blog.models import Post
from django.conf import settings
from django.conf.urls import patterns, url
from django.conf.urls.static import static
from django.http import HttpResponse
from django.contrib.sitemaps.views import sitemap
from django.contrib.sites.models import Site
from blog.sitemap import PostSitemap, FlatpageSitemap
from blog.views import (
PostsFeed,
get_search_results,
IndexListView,
PostDetailView,
responsive_flatpage,
)
# Define sitemaps
sitemaps = {"posts": PostSitemap, "pages": FlatpageSitemap}
# Define robots.txt content
current_site = Site.objects.get_current()
robots_content = "User-agent: *\nDisallow: /admin/\nSitemap: https://{}/sitemap.xml".format(
current_site.domain
)
# Define pages
urlpatterns = patterns(
"",
# Index
url(r"^(?P<page>\d+)?/?$", IndexListView.as_view(), name="index",),
# Individual posts
url(
r"^blog/(?P<pub_date__year>\d{4})/(?P<slug>[a-zA-Z0-9-]+)/?$",
PostDetailView.as_view(),
name="post",
),
# Post RSS feed
url(r"^feed/posts/$", PostsFeed()),
# Search posts
url(r"^search", get_search_results, name="search"),
# robots.txt
url(
r"^robots.txt$",
lambda r: HttpResponse(robots_content, content_type="text/plain"),
),
# sitemap
url(
r"^sitemap\.xml$",
sitemap,
{"sitemaps": sitemaps},
name="django.contrib.sitemaps.views.sitemap",
),
)
# Add flat pages
urlpatterns += patterns(
"django.contrib.flatpages.views",
url(r"^about/$", responsive_flatpage, {"url": "/about/"}, name="about"),
url(r"^privacy/$", responsive_flatpage, {"url": "/privacy/"}, name="privacy"),
)
# Debug
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 27.074627 | 92 | 0.664278 |
67ed08d0c69a3a25f48d6a30f6a3e7cf631dac5d | 1,742 | py | Python | sources/python/scripts/tools.py | eddwang/apso-modified | c1a8343930bc72c2d9a979df7972e846d1ac0a91 | [
"Apache-2.0"
] | null | null | null | sources/python/scripts/tools.py | eddwang/apso-modified | c1a8343930bc72c2d9a979df7972e846d1ac0a91 | [
"Apache-2.0"
] | null | null | null | sources/python/scripts/tools.py | eddwang/apso-modified | c1a8343930bc72c2d9a979df7972e846d1ac0a91 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
def console(*args, **kwargs):
'''
Launch the "python interpreter" gui.
Positional arguments are only intended for arguments automatically
passed by the program gui.
Keyword arguments are:
- 'loc': for passing caller's locales and/or globals to the console context
- any constructor constant (BACKGROUND, FOREGROUND...) to tweak the console aspect.
Examples:
- console() # defaut constructor)
- console(loc=locals())
- console(BACKGROUND=0x0, FOREGROUND=0xFFFFFF)
More infos: https://extensions.libreoffice.org/en/extensions/show/apso-alternative-script-organizer-for-python.
'''
# we need to load apso before import statement
ctx = XSCRIPTCONTEXT.getComponentContext()
ctx.ServiceManager.createInstance("apso.python.script.organizer.impl")
# now we can use apso_utils library
from apso_utils import console
from pathlib import Path
from uno import fileUrlToSystemPath
import sys
import os
desktop = XSCRIPTCONTEXT.getDesktop()
doc = desktop.getCurrentComponent()
__file__ = os.path.join(str(Path.home()),"python") if doc.Location == "" else doc.Location
try:
os.chdir(fileUrlToSystemPath(os.path.dirname(__file__)))
except:
os.chdir(os.path.dirname(__file__))
sys.path.append(".")
kwargs.setdefault('loc', {})
kwargs['loc'].setdefault('XSCRIPTCONTEXT', XSCRIPTCONTEXT)
kwargs['loc'].setdefault('__file__', __file__)
kwargs['loc'].setdefault('os', os)
kwargs['loc'].setdefault('sys', sys)
kwargs.setdefault('BACKGROUND',0x0)
kwargs.setdefault('FOREGROUND',0xFFFFFF)
kwargs.setdefault('WIDTH',1000)
console(**kwargs)
g_exportedScripts = console,
| 33.5 | 115 | 0.695752 |
f9e96e1388b20e90ff1642c7b55c2c3509fc90de | 1,557 | py | Python | empower/apps/pingpong/__init__.py | herlesupreeth/5G-Controller | 65a84fc4ad5c04764b12244809c7c4c758d6d352 | [
"Apache-2.0"
] | null | null | null | empower/apps/pingpong/__init__.py | herlesupreeth/5G-Controller | 65a84fc4ad5c04764b12244809c7c4c758d6d352 | [
"Apache-2.0"
] | null | null | null | empower/apps/pingpong/__init__.py | herlesupreeth/5G-Controller | 65a84fc4ad5c04764b12244809c7c4c758d6d352 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2015, Roberto Riggio
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the CREATE-NET nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY CREATE-NET ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CREATE-NET BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Ping-pong handover App."""
| 53.689655 | 79 | 0.765575 |
7021485bfdcca96d88059dc00fad92feb68320b5 | 67,338 | py | Python | wflow/wflow/wflow_hbv.py | quanpands/wflow | b454a55e4a63556eaac3fbabd97f8a0b80901e5a | [
"MIT"
] | null | null | null | wflow/wflow/wflow_hbv.py | quanpands/wflow | b454a55e4a63556eaac3fbabd97f8a0b80901e5a | [
"MIT"
] | null | null | null | wflow/wflow/wflow_hbv.py | quanpands/wflow | b454a55e4a63556eaac3fbabd97f8a0b80901e5a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Wflow is Free software, see below:
#
# Copyright (c) J. Schellekens/Deltares 2005-2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TODO: split off routing
"""
Run the wflow_hbv hydrological model..
usage:
wflow_hbv::
[-h][-v level][-F runinfofile][-L logfile][-C casename][-R runId]
[-c configfile][-T timesteps][-s seconds][-W][-E][-N][-U discharge]
[-P parameter multiplication][-X][-l loglevel]
-F: if set wflow is expected to be run by FEWS. It will determine
the timesteps from the runinfo.xml file and save the output initial
conditions to an alternate location. Also set fewsrun=1 in the .ini file!
-f: Force overwrite of existing results
-T: Set end time of the run: yyyy-mm-dd hh:mm:ss
-S: Set start time of the run: yyyy-mm-dd hh:mm:ss
-N: No lateral flow, use runoff response function to generate fast runoff
-s: Set the model timesteps in seconds
-I: re-initialize the initial model conditions with default
-i: Set input table directory (default is intbl)
-x: run for subcatchment only (e.g. -x 1)
-C: set the name of the case (directory) to run
-R: set the name runId within the current case
-L: set the logfile
-c: name of wflow the configuration file (default: Casename/wflow_hbv.ini).
-h: print usage information
-U: The argument to this option should be a .tss file with measured discharge in
[m^3/s] which the program will use to update the internal state to match
the measured flow. The number of columns in this file should match the
number of gauges in the wflow_gauges.map file.
-u: list of gauges/columns to use in update. Format:
-u [1 , 4 ,13]
The above example uses column 1, 4 and 13
-P: set parameter change string (e.g: -P "self.FC = self.FC * 1.6") for non-dynamic variables
-p: set parameter change string (e.g: -P "self.Precipitation = self.Precipitation * 1.11") for
dynamic variables
-l: loglevel (most be one of DEBUG, WARNING, ERROR)
-X overwrites the initial values at the end of each timestep
"""
import os.path
import pcraster.framework
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
wflow = "wflow_hbv"
#: columns used in updating
updateCols = [] #: columns used in updating
""" Column used in updating """
def usage(*args):
"""
Print usage information
- *args: command line arguments given
"""
sys.stdout = sys.stderr
for msg in args:
print(msg)
print(__doc__)
sys.exit(0)
class WflowModel(pcraster.framework.DynamicModel):
"""
The user defined model class.
"""
def __init__(self, cloneMap, Dir, RunDir, configfile):
pcraster.framework.DynamicModel.__init__(self)
self.caseName = os.path.abspath(Dir)
self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
pcr.setclone(self.clonemappath)
self.runId = RunDir
self.Dir = os.path.abspath(Dir)
self.configfile = configfile
self.SaveDir = os.path.join(self.Dir, self.runId)
def updateRunOff(self):
"""
Updates the kinematic wave reservoir
"""
self.WaterLevel = (self.Alpha * pow(self.SurfaceRunoff, self.Beta)) / self.Bw
# wetted perimeter (m)
P = self.Bw + (2 * self.WaterLevel)
# Alpha
self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
self.OldKinWaveVolume = self.KinWaveVolume
self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
def stateVariables(self):
"""
returns a list of state variables that are essential to the model.
This list is essential for the resume and suspend functions to work.
This function is specific for each model and **must** be present.
:var self.SurfaceRunoff: Surface runoff in the kin-wave resrvoir [m^3/s]
:var self.WaterLevel: Water level in the kin-wave resrvoir [m]
:var self.DrySnow: Snow pack [mm]
:var self.FreeWater: Available free water [mm]
:var self.UpperZoneStorage: Water in the upper zone [mm]
:var self.LowerZoneStorage: Water in the lower zone [mm]
:var self.SoilMoisture: Soil moisture [mm]
:var self.InterceptionStorage: Amount of water on the Canopy [mm]
"""
states = [
"FreeWater",
"SoilMoisture",
"UpperZoneStorage",
"LowerZoneStorage",
"InterceptionStorage",
"SurfaceRunoff",
"WaterLevel",
"DrySnow",
]
if hasattr(self, "ReserVoirSimpleLocs"):
states.append("ReservoirVolume")
if hasattr(self, "ReserVoirComplexLocs"):
states.append("ReservoirWaterLevel")
if hasattr(self, "GlacierFrac"):
states.append("GlacierStore")
return states
# The following are made to better connect to deltashell/openmi
def supplyCurrentTime(self):
"""
gets the current time in seconds after the start of the run
Ouput:
- time in seconds since the start of the model run
"""
return self.currentTimeStep() * int(
configget(self.config, "model", "timestepsecs", "86400")
)
def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
If you use this make sure to all wf_updateparameters at the start of the dynamic section
and at the start/end of the initial section
"""
modelparameters = []
# Static model parameters e.g.
# modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# Meteo and other forcing
self.P_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
) # timeseries for rainfall
self.PET_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
self.TEMP_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
) # timeseries for rainfall "/inmaps/TEMP" # global radiation
self.Inflow_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Inflow", "/inmaps/IF"
) # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
self.Seepage_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Seepage", "/inmaps/SE"
) # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions
# Meteo and other forcing
modelparameters.append(
self.ParamType(
name="Precipitation",
stack=self.P_mapstack,
type="timeseries",
default=0.0,
verbose=True,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="PotEvaporation",
stack=self.PET_mapstack,
type="timeseries",
default=0.0,
verbose=True,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="Temperature",
stack=self.TEMP_mapstack,
type="timeseries",
default=10.0,
verbose=True,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="Inflow",
stack=self.Inflow_mapstack,
type="timeseries",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
modelparameters.append(
self.ParamType(
name="Seepage",
stack=self.Seepage_mapstack,
type="timeseries",
default=0.0,
verbose=False,
lookupmaps=[],
)
)
return modelparameters
def suspend(self):
"""
Suspends the model to disk. All variables needed to restart the model
are saved to disk as pcraster maps. Use resume() to re-read them
"""
self.logger.info("Saving initial conditions...")
self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
if self.OverWriteInit:
self.logger.info("Saving initial conditions over start conditions...")
self.wf_suspend(os.path.join(self.SaveDir, "instate"))
if self.fewsrun:
self.logger.info("Saving initial conditions for FEWS...")
self.wf_suspend(os.path.join(self.Dir, "outstate"))
def initial(self):
"""
Initial part of the model, executed only once. Reads all static model
information (parameters) and sets-up the variables used in modelling.
*HBV Soil*
:var FC.tbl: Field Capacity (260.0) [mm]
:var BetaSeepage.tbl: exponent in soil runoff generation equation (1.8) [-]
:var LP.tbl: fraction of Fieldcapacity below which actual evaporation=potential evaporation (0.53000)
:var K4.tbl: Recession constant baseflow (0.02307)
*If SetKquickFlow is set to 1*
:var KQuickFlow.tbl: (0.09880)
:var SUZ.tbl: Level over which K0 is used (100.0)
:var K0.tbl: (0.3)
*If SetKquickFlow is set to 0*
:var KHQ.tbl: recession rate at flow HQ (0.09880)
:var HQ.tbl: high flow rate HQ for which recession rate of upper reservoir is known (3.27000)
:var AlphaNL.tbl: measure of non-linearity of upper reservoir (1.1)
:var PERC.tbl: Percolation from Upper to Lowerzone (0.4000) [mm/day]
:var CFR.tbl: Refreezing efficiency constant in refreezing of freewater in snow (0.05000)
:var Pcorr.tbl: Correction factor for precipitation (1.0)
:var RFCF.tbl: Correction factor for rainfall (1.0)
:var SFCF.tbl: Correction factor for snowfall(1.0)
:var Cflux.tbl: Maximum capillary rise from runoff response routine to soil moisture routine (2.0)
:var ICF.tbl: Maximum interception storage (in forested AND non-forested areas) (2.0)
:var CEVPF.tbl: Correction factor for potential evaporation (1.0)
:var EPF.tbl: Exponent of correction factor for evaporation on days with precipitation(0.0)
:var ECORR.tbl: Evap correction (1.0)
*Snow modelling parameters*
:var TTI.tbl: critical temperature for snowmelt and refreezing (1.000) [oC]
:var TT.tbl: defines interval in which precipitation falls as rainfall and snowfall (-1.41934) [oC]
:var Cfmax.tbl: meltconstant in temperature-index ( 3.75653) [-]
:var WHC.tbl: fraction of Snowvolume that can store water (0.1) [-]
"""
global statistics
global multpars
global updateCols
pcr.setglobaloption("unittrue")
self.thestep = pcr.scalar(0)
self.basetimestep = 86400
#: files to be used in case of timesries (scalar) input to the model
#: name of the tss file with precipitation data ("../intss/P.tss")
self.precipTss = "../intss/P.tss"
self.evapTss = (
"../intss/PET.tss"
) #: name of the tss file with potential evap data ("../intss/PET.tss")
self.tempTss = (
"../intss/T.tss"
) #: name of the tss file with temperature data ("../intss/T.tss")
self.inflowTss = (
"../intss/Inflow.tss"
) #: NOT TESTED name of the tss file with inflow data ("../intss/Inflow.tss")
self.SeepageTss = (
"../intss/Seepage.tss"
) #: NOT TESTED name of the tss file with seepage data ("../intss/Seepage.tss")"
self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
# Set and get defaults from ConfigFile here ###################################
self.scalarInput = int(configget(self.config, "model", "ScalarInput", "0"))
self.Tslice = int(configget(self.config, "model", "Tslice", "1"))
self.interpolMethod = configget(
self.config, "model", "InterpolationMethod", "inv"
)
self.reinit = int(configget(self.config, "run", "reinit", "0"))
self.fewsrun = int(configget(self.config, "run", "fewsrun", "0"))
self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
self.updating = int(configget(self.config, "model", "updating", "0"))
self.updateFile = configget(self.config, "model", "updateFile", "no_set")
self.sCatch = int(configget(self.config, "model", "sCatch", "0"))
self.intbl = configget(self.config, "model", "intbl", "intbl")
self.P_style = int(configget(self.config, "model", "P_style", "1"))
self.PET_style = int(configget(self.config, "model", "PET_style", "1"))
self.TEMP_style = int(configget(self.config, "model", "TEMP_style", "1"))
self.modelSnow = int(configget(self.config, "model", "ModelSnow", "1"))
sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
alf = float(configget(self.config, "model", "Alpha", "60"))
Qmax = float(configget(self.config, "model", "AnnualDischarge", "300"))
self.UpdMaxDist = float(configget(self.config, "model", "UpdMaxDist", "100"))
self.MaxUpdMult = float(configget(self.config, "model", "MaxUpdMult", "1.3"))
self.MinUpdMult = float(configget(self.config, "model", "MinUpdMult", "0.7"))
self.UpFrac = float(configget(self.config, "model", "UpFrac", "0.8"))
self.ExternalQbase = int(configget(self.config, "model", "ExternalQbase", "0"))
self.SetKquickFlow = int(configget(self.config, "model", "SetKquickFlow", "0"))
self.MassWasting = int(configget(self.config, "model", "MassWasting", "0"))
self.SubCatchFlowOnly = int(
configget(self.config, "model", "SubCatchFlowOnly", "0")
)
# static maps to use (normally default)
wflow_subcatch = configget(
self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map"
)
wflow_dem = configget(
self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map"
)
wflow_ldd = configget(
self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map"
)
wflow_river = configget(
self.config, "model", "wflow_river", "staticmaps/wflow_river.map"
)
wflow_riverlength = configget(
self.config,
"model",
"wflow_riverlength",
"staticmaps/wflow_riverlength.map",
)
wflow_riverlength_fact = configget(
self.config,
"model",
"wflow_riverlength_fact",
"staticmaps/wflow_riverlength_fact.map",
)
wflow_landuse = configget(
self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map"
)
wflow_soil = configget(
self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map"
)
wflow_gauges = configget(
self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map"
)
wflow_inflow = configget(
self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map"
)
wflow_mgauges = configget(
self.config, "model", "wflow_mgauges", "staticmaps/wflow_mgauges.map"
)
wflow_riverwidth = configget(
self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map"
)
# 2: Input base maps ########################################################
subcatch = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
) # Determines the area of calculations (all cells > 0)
subcatch = pcr.ifthen(subcatch > 0, subcatch)
if self.sCatch > 0:
subcatch = pcr.ifthen(subcatch == sCatch, subcatch)
self.Altitude = self.wf_readmap(
os.path.join(self.Dir, wflow_dem), 0.0, fail=True
) * pcr.scalar(
pcr.defined(subcatch)
) #: The digital elevation map (DEM)
self.TopoLdd = self.wf_readmap(
os.path.join(self.Dir, wflow_ldd), 0.0, fail=True
) #: The local drinage definition map (ldd)
self.TopoId = pcr.ordinal(
self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
) #: Map define the area over which the calculations are done (mask)
self.River = pcr.cover(
pcr.boolean(
self.wf_readmap(os.path.join(self.Dir, wflow_river), 0.0, fail=True)
),
0,
) #: river network map. Fro those cell that belong to a river a specific width is used in the kinematic wave caulations
self.RiverLength = self.wf_readmap(
os.path.join(self.Dir, wflow_riverlength), 0.0
)
# Factor to multiply riverlength with (defaults to 1.0)
self.RiverLengthFac = self.wf_readmap(
os.path.join(self.Dir, wflow_riverlength_fact), 1.0
)
# read landuse and soilmap and make sure there are no missing points related to the
# subcatchment map. Currently sets the lu and soil type type to 1
self.LandUse = self.wf_readmap(
os.path.join(self.Dir, wflow_landuse), 0.0, fail=True
) #: Map with lan-use/cover classes
self.LandUse = pcr.cover(self.LandUse, pcr.nominal(pcr.ordinal(subcatch) > 0))
self.Soil = self.wf_readmap(
os.path.join(self.Dir, wflow_soil), 0.0, fail=True
) #: Map with soil classes
self.Soil = pcr.cover(self.Soil, pcr.nominal(pcr.ordinal(subcatch) > 0))
self.OutputLoc = self.wf_readmap(
os.path.join(self.Dir, wflow_gauges), 0.0, fail=True
) #: Map with locations of output gauge(s)
self.InflowLoc = pcr.nominal(
self.wf_readmap(os.path.join(self.Dir, wflow_inflow), 0.0)
) #: Map with location of abstractions/inflows.
self.SeepageLoc = self.wf_readmap(
os.path.join(self.Dir, wflow_inflow), 0.0
) #: Seapage from external model (if configured)
RiverWidth = self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth), 0.0)
# Temperature correction per cell to add
self.TempCor = self.wf_readmap(
os.path.join(
self.Dir,
configget(
self.config,
"model",
"TemperatureCorrectionMap",
"staticmap/swflow_tempcor.map",
),
),
0.0,
)
if self.scalarInput:
self.gaugesMap = self.wf_readmap(
os.path.join(self.Dir, wflow_mgauges), 0.0, fail=True
) #: Map with locations of rainfall/evap/temp gauge(s). Only needed if the input to the model is not in maps
self.OutputId = self.wf_readmap(
os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True
) # location of subcatchment
self.ZeroMap = 0.0 * pcr.scalar(
pcr.defined(self.Altitude)
) # map with only zero's
# 3: Input time series ###################################################
self.P_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
) # timeseries for rainfall
self.PET_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
self.TEMP_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
) # timeseries for rainfall "/inmaps/TEMP" # global radiation
self.Inflow_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Inflow", "/inmaps/IF"
) # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
self.Seepage_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "Seepage", "/inmaps/SE"
) # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
# For in memory override:
self.P = self.ZeroMap
self.PET = self.ZeroMap
self.TEMP = self.ZeroMap
# Set static initial values here #########################################
self.Latitude = pcr.ycoordinate(pcr.boolean(self.Altitude))
self.Longitude = pcr.xcoordinate(pcr.boolean(self.Altitude))
self.logger.info("Linking parameters to landuse, catchment and soil...")
self.Beta = pcr.scalar(0.6) # For sheetflow
# self.M=pcr.lookupscalar(self.Dir + "/" + modelEnv['intbl'] + "/M.tbl" ,self.LandUse,subcatch,self.Soil) # Decay parameter in Topog_sbm
self.N = pcr.lookupscalar(
self.Dir + "/" + self.intbl + "/N.tbl", self.LandUse, subcatch, self.Soil
) # Manning overland flow
""" *Parameter:* Manning's N for all non-river cells """
self.NRiver = pcr.lookupscalar(
self.Dir + "/" + self.intbl + "/N_River.tbl",
self.LandUse,
subcatch,
self.Soil,
) # Manning river
""" Manning's N for all cells that are marked as a river """
self.wf_updateparameters()
self.ReserVoirLocs = self.ZeroMap
if hasattr(self, "ReserVoirSimpleLocs"):
# Check if we have simple and or complex reservoirs
tt_simple = pcr.pcr2numpy(self.ReserVoirSimpleLocs, 0.0)
self.nrresSimple = tt_simple.max()
self.ReserVoirLocs = self.ReserVoirLocs + pcr.cover(
pcr.scalar(self.ReserVoirSimpleLocs)
)
else:
self.nrresSimple = 0
if hasattr(self, "ReserVoirComplexLocs"):
tt_complex = pcr.pcr2numpy(self.ReserVoirComplexLocs, 0.0)
self.nrresComplex = tt_complex.max()
self.ReserVoirLocs = self.ReserVoirLocs + pcr.cover(
pcr.scalar(self.ReserVoirComplexLocs)
)
res_area = pcr.cover(pcr.scalar(self.ReservoirComplexAreas), 0.0)
self.filter_P_PET = pcr.ifthenelse(
res_area > 0, res_area * 0.0, res_area * 0.0 + 1.0
)
# read files
self.sh = {}
res_ids = pcr.ifthen(self.ResStorFunc == 2, self.ReserVoirComplexLocs)
np_res_ids = pcr.pcr2numpy(res_ids, 0)
np_res_ids_u = np.unique(np_res_ids[np.nonzero(np_res_ids)])
if np.size(np_res_ids_u) > 0:
for item in np.nditer(np_res_ids_u):
self.sh[int(item)] = np.loadtxt(
self.Dir
+ "/"
+ self.intbl
+ "/Reservoir_SH_"
+ str(item)
+ ".tbl"
)
self.hq = {}
res_ids = pcr.ifthen(self.ResOutflowFunc == 1, self.ReserVoirComplexLocs)
np_res_ids = pcr.pcr2numpy(res_ids, 0)
np_res_ids_u = np.unique(np_res_ids[np.nonzero(np_res_ids)])
if np.size(np_res_ids_u) > 0:
for item in np.nditer(np_res_ids_u):
self.hq[int(item)] = np.loadtxt(
self.Dir
+ "/"
+ self.intbl
+ "/Reservoir_HQ_"
+ str(item)
+ ".tbl",
skiprows=3,
)
else:
self.nrresComplex = 0
if (self.nrresSimple + self.nrresComplex) > 0:
self.ReserVoirLocs = pcr.ordinal(self.ReserVoirLocs)
self.logger.info(
"A total of "
+ str(self.nrresSimple)
+ " simple reservoirs and "
+ str(self.nrresComplex)
+ " complex reservoirs found."
)
self.ReserVoirDownstreamLocs = pcr.downstream(
self.TopoLdd, self.ReserVoirLocs
)
self.TopoLddOrg = self.TopoLdd
self.TopoLdd = pcr.lddrepair(
pcr.cover(
pcr.ifthen(pcr.boolean(self.ReserVoirLocs), pcr.ldd(5)),
self.TopoLdd,
)
)
# HBV Soil params
self.FC = self.readtblDefault(
self.Dir + "/" + self.intbl + "/FC.tbl",
self.LandUse,
subcatch,
self.Soil,
260.0,
)
self.BetaSeepage = self.readtblDefault(
self.Dir + "/" + self.intbl + "/BetaSeepage.tbl",
self.LandUse,
subcatch,
self.Soil,
1.8,
) # exponent in soil runoff generation equation
self.LP = self.readtblDefault(
self.Dir + "/" + self.intbl + "/LP.tbl",
self.LandUse,
subcatch,
self.Soil,
0.53000,
) # fraction of Fieldcapacity below which actual evaporation=potential evaporation (LP)
self.K4 = self.readtblDefault(
self.Dir + "/" + self.intbl + "/K4.tbl",
self.LandUse,
subcatch,
self.Soil,
0.02307,
) # Recession constant baseflow #K4=0.07; BASEFLOW:LINEARRESERVOIR
if self.SetKquickFlow:
self.KQuickFlow = self.readtblDefault(
self.Dir + "/" + self.intbl + "/KQuickFlow.tbl",
self.LandUse,
subcatch,
self.Soil,
0.09880,
) # recession rate at flow HQ #KHQ=0.2; OUTFLOWUPPERZONE_NONLINEARRESERVOIR
self.SUZ = self.readtblDefault(
self.Dir + "/" + self.intbl + "/SUZ.tbl",
self.LandUse,
subcatch,
self.Soil,
100.0,
) # Level over wich K0 is used
self.K0 = self.readtblDefault(
self.Dir + "/" + self.intbl + "/K0.tbl",
self.LandUse,
subcatch,
self.Soil,
0.3,
) # K0
else:
self.KHQ = self.readtblDefault(
self.Dir + "/" + self.intbl + "/KHQ.tbl",
self.LandUse,
subcatch,
self.Soil,
0.09880,
) # recession rate at flow HQ #KHQ=0.2; OUTFLOWUPPERZONE_NONLINEARRESERVOIR
self.HQ = self.readtblDefault(
self.Dir + "/" + self.intbl + "/HQ.tbl",
self.LandUse,
subcatch,
self.Soil,
3.27000,
) # high flow rate HQ for which recession rate of upper reservoir is known #HQ=3.76;
self.AlphaNL = self.readtblDefault(
self.Dir + "/" + self.intbl + "/AlphaNL.tbl",
self.LandUse,
subcatch,
self.Soil,
1.1,
) # measure of non-linearity of upper reservoir #Alpha=1.6;
self.PERC = self.readtblDefault(
self.Dir + "/" + self.intbl + "/PERC.tbl",
self.LandUse,
subcatch,
self.Soil,
0.4000,
) # percolation from Upper to Lowerzone (mm/day)
self.CFR = self.readtblDefault(
self.Dir + "/" + self.intbl + "/CFR.tbl",
self.LandUse,
subcatch,
self.Soil,
0.05000,
) # refreezing efficiency constant in refreezing of freewater in snow
# self.FoCfmax=self.readtblDefault(self.Dir + "/" + modelEnv['intbl'] + "/FoCfmax.tbl",self.LandUse,subcatch,self.Soil, 0.6000) # correcton factor for snow melt/refreezing in forested and non-forested areas
self.Pcorr = self.readtblDefault(
self.Dir + "/" + self.intbl + "/Pcorr.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
) # correction factor for precipitation
self.RFCF = self.readtblDefault(
self.Dir + "/" + self.intbl + "/RFCF.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
) # correction factor for rainfall
self.SFCF = self.readtblDefault(
self.Dir + "/" + self.intbl + "/SFCF.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
) # correction factor for snowfall
self.Cflux = self.readtblDefault(
self.Dir + "/" + self.intbl + "/Cflux.tbl",
self.LandUse,
subcatch,
self.Soil,
2.0,
) # maximum capillary rise from runoff response routine to soil moisture routine
self.ICF = self.readtblDefault(
self.Dir + "/" + self.intbl + "/ICF.tbl",
self.LandUse,
subcatch,
self.Soil,
2.0,
) # maximum interception storage (in forested AND non-forested areas)
self.CEVPF = self.readtblDefault(
self.Dir + "/" + self.intbl + "/CEVPF.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
) # correction factor for potential evaporation (1.15 in in forested areas )
self.EPF = self.readtblDefault(
self.Dir + "/" + self.intbl + "/EPF.tbl",
self.LandUse,
subcatch,
self.Soil,
0.0,
) # exponent of correction factor for evaporation on days with precipitation
self.ECORR = self.readtblDefault(
self.Dir + "/" + self.intbl + "/ECORR.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
) # evap correction
# Soil Moisture parameters
self.ECALT = self.ZeroMap + 0.00000 # evaporation lapse per 100m
# self.Ecorr=self.ZeroMap+1 # correction factor for evaporation
# HBV Snow parameters
# critical temperature for snowmelt and refreezing: TTI= 1.000
self.TTI = self.readtblDefault(
self.Dir + "/" + self.intbl + "/TTI.tbl",
self.LandUse,
subcatch,
self.Soil,
1.0,
)
# TT = -1.41934 # defines interval in which precipitation falls as rainfall and snowfall
self.TT = self.readtblDefault(
self.Dir + "/" + self.intbl + "/TT.tbl",
self.LandUse,
subcatch,
self.Soil,
-1.41934,
)
# Cfmax = 3.75653 # meltconstant in temperature-index
self.Cfmax = self.readtblDefault(
self.Dir + "/" + self.intbl + "/Cfmax.tbl",
self.LandUse,
subcatch,
self.Soil,
3.75653,
)
# WHC= 0.10000 # fraction of Snowvolume that can store water
self.WHC = self.readtblDefault(
self.Dir + "/" + self.intbl + "/WHC.tbl",
self.LandUse,
subcatch,
self.Soil,
0.1,
)
# Determine real slope and cell length
self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
self.ZeroMap, sizeinmetres
)
self.Slope = pcr.slope(self.Altitude)
self.Slope = pcr.ifthen(
pcr.boolean(self.TopoId),
pcr.max(0.001, self.Slope * pcr.celllength() / self.reallength),
)
Terrain_angle = pcr.scalar(pcr.atan(self.Slope))
temp = (
pcr.catchmenttotal(pcr.cover(1.0), self.TopoLdd)
* self.reallength
* 0.001
* 0.001
* self.reallength
)
self.QMMConvUp = pcr.cover(self.timestepsecs * 0.001) / temp
# Multiply parameters with a factor (for calibration etc) -P option in command line
self.wf_multparameters()
self.N = pcr.ifthenelse(self.River, self.NRiver, self.N)
# Determine river width from DEM, upstream area and yearly average discharge
# Scale yearly average Q at outlet with upstream are to get Q over whole catchment
# Alf ranges from 5 to > 60. 5 for hardrock. large values for sediments
# "Noah J. Finnegan et al 2005 Controls on the channel width of rivers:
# Implications for modeling fluvial incision of bedrock"
upstr = pcr.catchmenttotal(1, self.TopoLdd)
Qscale = upstr / pcr.mapmaximum(upstr) * Qmax
W = (
(alf * (alf + 2.0) ** (0.6666666667)) ** (0.375)
* Qscale ** (0.375)
* (pcr.max(0.0001, pcr.windowaverage(self.Slope, pcr.celllength() * 4.0)))
** (-0.1875)
* self.N ** (0.375)
)
# Use supplied riverwidth if possible, else calulate
RiverWidth = pcr.ifthenelse(RiverWidth <= 0.0, W, RiverWidth)
self.SnowWater = self.ZeroMap
# Which columns/gauges to use/ignore in kinematic wave updating
self.UpdateMap = self.ZeroMap
if self.updating:
_tmp = pcr.pcr2numpy(self.OutputLoc, 0.0)
gaugear = _tmp
touse = numpy.zeros(gaugear.shape, dtype="int")
for thecol in updateCols:
idx = (gaugear == thecol).nonzero()
touse[idx] = thecol
self.UpdateMap = pcr.numpy2pcr(pcr.Nominal, touse, 0.0)
# Calculate distance to updating points (upstream) annd use to scale the correction
# ldddist returns zero for cell at the gauges so add 1.0 tp result
self.DistToUpdPt = pcr.cover(
pcr.min(
ldddist(self.TopoLdd, pcr.boolean(pcr.cover(self.UpdateMap, 0)), 1)
* self.reallength
/ pcr.celllength(),
self.UpdMaxDist,
),
self.UpdMaxDist,
)
# self.DistToUpdPt = ldddist(self.TopoLdd,pcr.boolean(pcr.cover(self.OutputId,0.0)),1)
# * self.reallength/celllength()
# Initializing of variables
self.logger.info("Initializing of model variables..")
self.TopoLdd = pcr.lddmask(self.TopoLdd, pcr.boolean(self.TopoId))
catchmentcells = pcr.maptotal(pcr.scalar(self.TopoId))
# Limit lateral flow per subcatchment (make pits at all subcatch boundaries)
# This is very handy for Ribasim etc...
if self.SubCatchFlowOnly > 0:
self.logger.info("Creating subcatchment-only drainage network (ldd)")
ds = pcr.downstream(self.TopoLdd, self.TopoId)
usid = pcr.ifthenelse(ds != self.TopoId, self.TopoId, 0)
self.TopoLdd = pcr.lddrepair(
pcr.ifthenelse(pcr.boolean(usid), pcr.ldd(5), self.TopoLdd)
)
# Used to seperate output per LandUse/management classes
# OutZones = self.LandUse
# pcr.report(self.reallength,"rl.map")
# pcr.report(catchmentcells,"kk.map")
self.QMMConv = self.timestepsecs / (
self.reallength * self.reallength * 0.001
) # m3/s --> mm
self.ToCubic = (
self.reallength * self.reallength * 0.001
) / self.timestepsecs # m3/s
self.sumprecip = self.ZeroMap #: accumulated rainfall for water balance
self.sumevap = self.ZeroMap #: accumulated evaporation for water balance
self.sumrunoff = (
self.ZeroMap
) #: accumulated runoff for water balance (weigthted for upstream area)
self.sumlevel = self.ZeroMap #: accumulated level for water balance
self.sumpotevap = self.ZeroMap # accumulated runoff for water balance
self.sumsoilevap = self.ZeroMap
self.sumtemp = self.ZeroMap # accumulated runoff for water balance
self.ForecQ_qmec = (
self.ZeroMap
) # Extra inflow to kinematic wave reservoir for forcing in m^/sec
self.KinWaveVolume = self.ZeroMap
self.OldKinWaveVolume = self.ZeroMap
self.Qvolume = self.ZeroMap
self.Q = self.ZeroMap
self.suminflow = self.ZeroMap
# cntd
self.FieldCapacity = self.FC #: total water holding capacity of the soil
self.Treshold = (
self.LP * self.FieldCapacity
) # Threshold soilwaterstorage above which AE=PE
# CatSurface=pcr.maptotal(pcr.scalar(pcr.ifthen(pcr.scalar(self.TopoId)>scalar(0.0),pcr.scalar(1.0)))) # catchment surface (in km2)
self.Aspect = pcr.scalar(pcr.aspect(self.Altitude)) # aspect [deg]
self.Aspect = pcr.ifthenelse(self.Aspect <= 0.0, pcr.scalar(0.001), self.Aspect)
# On Flat areas the Aspect function fails, fill in with average...
self.Aspect = pcr.ifthenelse(
pcr.defined(self.Aspect),
self.Aspect,
pcr.areaaverage(self.Aspect, self.TopoId),
)
# Set DCL to riverlength if that is longer that the basic length calculated from grid
drainlength = detdrainlength(self.TopoLdd, self.xl, self.yl)
self.DCL = pcr.max(drainlength, self.RiverLength) # m
# Multiply with Factor (taken from upscaling operation, defaults to 1.0 if no map is supplied
self.DCL = self.DCL * pcr.max(1.0, self.RiverLengthFac)
# water depth (m)
# set width for kinematic wave to cell width for all cells
self.Bw = detdrainwidth(self.TopoLdd, self.xl, self.yl)
# However, in the main river we have real flow so set the width to the
# width of the river
self.Bw = pcr.ifthenelse(self.River, RiverWidth, self.Bw)
# term for Alpha
self.AlpTerm = pow((self.N / (pcr.sqrt(self.Slope))), self.Beta)
# power for Alpha
self.AlpPow = (2.0 / 3.0) * self.Beta
# initial approximation for Alpha
# calculate catchmentsize
self.upsize = pcr.catchmenttotal(self.xl * self.yl, self.TopoLdd)
self.csize = pcr.areamaximum(self.upsize, self.TopoId)
self.logger.info("End of initial section.")
def default_summarymaps(self):
"""
Returns a list of default summary-maps at the end of a run.
This is model specific. You can also add them to the [summary]section of the ini file but stuff
you think is crucial to the model should be listed here
Example:
"""
lst = [
"self.Cfmax",
"self.csize",
"self.upsize",
"self.TTI",
"self.TT",
"self.WHC",
"self.Slope",
"self.N",
"self.xl",
"self.yl",
"self.reallength",
"self.DCL",
"self.Bw",
]
return lst
def resume(self):
""" read initial state maps (they are output of a previous call to suspend()) """
if self.reinit == 1:
self.logger.info("Setting initial conditions to default (zero!)")
self.FreeWater = pcr.cover(0.0) #: Water on surface (state variable [mm])
self.SoilMoisture = self.FC #: Soil moisture (state variable [mm])
self.UpperZoneStorage = (
0.2 * self.FC
) #: Storage in Upper Zone (state variable [mm])
self.LowerZoneStorage = 1.0 / (
3.0 * self.K4
) #: Storage in Uppe Zone (state variable [mm])
self.InterceptionStorage = pcr.cover(
0.0
) #: Interception Storage (state variable [mm])
self.SurfaceRunoff = pcr.cover(
0.0
) #: Discharge in kinimatic wave (state variable [m^3/s])
self.WaterLevel = pcr.cover(
0.0
) #: Water level in kinimatic wave (state variable [m])
self.DrySnow = pcr.cover(0.0) #: Snow amount (state variable [mm])
if hasattr(self, "ReserVoirSimpleLocs"):
self.ReservoirVolume = self.ResMaxVolume * self.ResTargetFullFrac
if hasattr(self, "ReserVoirComplexLocs"):
self.ReservoirWaterLevel = pcr.cover(0.0)
if hasattr(self, "GlacierFrac"):
self.GlacierStore = self.wf_readmap(
os.path.join(self.Dir, "staticmaps", "GlacierStore.map"),
55.0 * 1000,
)
else:
self.wf_resume(os.path.join(self.Dir, "instate"))
P = self.Bw + (2.0 * self.WaterLevel)
self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
self.OldSurfaceRunoff = self.SurfaceRunoff
self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv
# Determine initial kinematic wave volume
self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
self.OldKinWaveVolume = self.KinWaveVolume
self.initstorage = (
self.FreeWater
+ self.DrySnow
+ self.SoilMoisture
+ self.UpperZoneStorage
+ self.LowerZoneStorage
+ self.InterceptionStorage
)
if not self.SetKquickFlow:
self.KQuickFlow = (self.KHQ ** (1.0 + self.AlphaNL)) * (
self.HQ ** -self.AlphaNL
) # recession rate of the upper reservoir, KHQ*UHQ=HQ=kquickflow*(UHQ**alpha)
def dynamic(self):
"""
Below a list of variables that can be save to disk as maps or as
timeseries (see ini file for syntax):
*Dynamic variables*
:var self.SurfaceRunoff: Surface runoff in the kinematic wave [m^3/s]
:var self.WaterLevel: Water level in the kinematic wave [m] (above the bottom)
:var self.InterceptionStorage: actual interception storage [mm]
:var self.Snow: Snow depth [mm]
:var self.SnowWater: water content of the snow [mm]
:var self.LowerZoneStorage: water content of the lower zone [mm]
:var self.UpperZoneStorage: water content of the Upper zone [mm]
:var self.InUpperZone: water inflow into Upper zone [mm]
:var self.HBVSeepage: recharge to Upper zone [mm]
:var self.DirectRunoff: direct runoff to Upper Zone [mm]
:var self.BaseFlow: Specific runoff (baseflow part) per cell [mm]
:var self.Percolation: actual percolation to the lower zone [mm]
:var self.SoilMoisture: actual soil moisture [mm]
:var se lf.QuickFlow: specific runoff (quickflow part) [mm]
:var self.RealQuickFlow: specific runoff (quickflow), If K upper zone is precalculated [mm]
:var self.CapFlux: capilary rise [mm]
:var self.SurfaceRunoffMM: SurfaceRunoff in mm
:var self.KinWaveVolume: Volume in the kinematic wave reservoir
:var self.SurfaceWaterSupply: the negative Inflow (water demand) that could be met from the surfacewater [m^3/s]
*Static variables*
:var self.Altitude: The altitude of each cell [m]
:var self.Bw: Width of the river [m]
:var self.River: booolean map indicating the presence of a river [-]
:var self.DLC: length of the river within a cell [m]
:var self.ToCubic: Mutiplier to convert mm to m^3/s for fluxes
"""
self.wf_updateparameters() # read forcing an dynamic parameters
self.Precipitation = pcr.max(0.0, self.Precipitation) * self.Pcorr
# self.Precipitation=pcr.cover(self.wf_readmap(self.P_mapstack,0.0),0.0) * self.Pcorr
# self.PotEvaporation=pcr.cover(self.wf_readmap(self.PET_mapstack,0.0),0.0)
# self.Inflow=pcr.cover(self.wf_readmap(self.Inflow_mapstack,0.0,verbose=False),0.0)
# These ar ALWAYS 0 at present!!!
# self.Inflow=pcrut.readmapSave(self.Inflow_mapstack,0.0)
if self.ExternalQbase:
self.Seepage = pcr.cover(self.wf_readmap(self.Seepage_mapstack, 0.0), 0.0)
else:
self.Seepage = pcr.cover(0.0)
self.Temperature = pcr.cover(self.wf_readmap(self.TEMP_mapstack, 10.0), 10.0)
self.Temperature = self.Temperature + self.TempCor
# Multiply input parameters with a factor (for calibration etc) -p option in command line (no also in ini)
self.wf_multparameters()
RainFrac = pcr.ifthenelse(
1.0 * self.TTI == 0.0,
pcr.ifthenelse(
self.Temperature <= self.TT, pcr.scalar(0.0), pcr.scalar(1.0)
),
pcr.min(
(self.Temperature - (self.TT - self.TTI / 2.0)) / self.TTI,
pcr.scalar(1.0),
),
)
RainFrac = pcr.max(
RainFrac, pcr.scalar(0.0)
) # fraction of precipitation which falls as rain
SnowFrac = 1.0 - RainFrac # fraction of self.Precipitation which falls as snow
self.Precipitation = (
self.SFCF * SnowFrac * self.Precipitation
+ self.RFCF * RainFrac * self.Precipitation
) # different correction for rainfall and snowfall
# Water onto the canopy
Interception = pcr.min(
self.Precipitation, self.ICF - self.InterceptionStorage
) #: Interception in mm/timestep
self.InterceptionStorage = (
self.InterceptionStorage + Interception
) #: Current interception storage
self.Precipitation = self.Precipitation - Interception
self.PotEvaporation = (
pcr.exp(-self.EPF * self.Precipitation) * self.ECORR * self.PotEvaporation
) # correction for potential evaporation on wet days
self.PotEvaporation = self.CEVPF * self.PotEvaporation # Correct per landuse
self.IntEvap = pcr.min(
self.InterceptionStorage, self.PotEvaporation
) #: Evaporation from interception storage
self.InterceptionStorage = self.InterceptionStorage - self.IntEvap
# I nthe origal HBV code
RestEvap = pcr.max(0.0, self.PotEvaporation - self.IntEvap)
if hasattr(self, "ReserVoirComplexLocs"):
self.ReserVoirPotEvap = self.PotEvaporation
self.ReserVoirPrecip = self.Precipitation
self.PotEvaporation = self.filter_P_PET * self.PotEvaporation
self.Precipitation = self.filter_P_PET * self.Precipitation
SnowFall = SnowFrac * self.Precipitation #: snowfall depth
RainFall = RainFrac * self.Precipitation #: rainfall depth
PotSnowMelt = pcr.ifthenelse(
self.Temperature > self.TT,
self.Cfmax * (self.Temperature - self.TT),
pcr.scalar(0.0),
) # Potential snow melt, based on temperature
PotRefreezing = pcr.ifthenelse(
self.Temperature < self.TT,
self.Cfmax * self.CFR * (self.TT - self.Temperature),
0.0,
) # Potential refreezing, based on temperature
Refreezing = pcr.ifthenelse(
self.Temperature < self.TT, pcr.min(PotRefreezing, self.FreeWater), 0.0
) # actual refreezing
self.SnowMelt = pcr.min(PotSnowMelt, self.DrySnow) # actual snow melt
self.DrySnow = (
self.DrySnow + SnowFall + Refreezing - self.SnowMelt
) # dry snow content
self.FreeWater = self.FreeWater - Refreezing # free water content in snow
MaxFreeWater = self.DrySnow * self.WHC
self.FreeWater = self.FreeWater + self.SnowMelt + RainFall
InSoil = pcr.max(
self.FreeWater - MaxFreeWater, 0.0
) # abundant water in snow pack which goes into soil
self.FreeWater = self.FreeWater - InSoil
RainAndSnowmelt = RainFall + self.SnowMelt
self.SnowCover = pcr.ifthenelse(self.DrySnow > 0, pcr.scalar(1), pcr.scalar(0))
self.NrCell = pcr.areatotal(self.SnowCover, self.TopoId)
# first part of precipitation is intercepted
# Interception=pcr.min(InSoil,self.ICF-self.InterceptionStorage)#: Interception in mm/timestep
# self.InterceptionStorage=self.InterceptionStorage+Interception #: Current interception storage
# NetInSoil=InSoil-Interception
NetInSoil = InSoil
self.SoilMoisture = self.SoilMoisture + NetInSoil
DirectRunoff = pcr.max(
self.SoilMoisture - self.FieldCapacity, 0.0
) # if soil is filled to capacity: abundant water runs of directly
self.SoilMoisture = self.SoilMoisture - DirectRunoff
NetInSoil = NetInSoil - DirectRunoff # net water which infiltrates into soil
MaxSnowPack = 10000.0
if self.MassWasting:
# Masswasting of snow
# 5.67 = tan 80 graden
SnowFluxFrac = pcr.min(0.5, self.Slope / 5.67) * pcr.min(
1.0, self.DrySnow / MaxSnowPack
)
MaxFlux = SnowFluxFrac * self.DrySnow
self.DrySnow = accucapacitystate(self.TopoLdd, self.DrySnow, MaxFlux)
self.FreeWater = accucapacitystate(
self.TopoLdd, self.FreeWater, SnowFluxFrac * self.FreeWater
)
else:
SnowFluxFrac = self.ZeroMap
MaxFlux = self.ZeroMap
if hasattr(self, "GlacierFrac"):
"""
Run Glacier module and add the snowpack on-top of it.
Estimate the fraction of snow turned into ice (HBV-light).
Estimate glacier melt.
glacierHBV function in wflow_lib.py
"""
self.DrySnow, self.Snow2Glacier, self.GlacierStore, self.GlacierMelt = glacierHBV(
self.GlacierFrac,
self.GlacierStore,
self.DrySnow,
self.Temperature,
self.G_TT,
self.G_Cfmax,
self.G_SIfrac,
self.timestepsecs,
self.basetimestep
)
# Convert to mm per grid cell and add to snowmelt
self.GlacierMelt = self.GlacierMelt * self.GlacierFrac
self.FreeWater = (
self.FreeWater + self.GlacierMelt
)
# IntEvap=pcr.min(self.InterceptionStorage,self.PotEvaporation) #: Evaporation from interception storage
# self.InterceptionStorage=self.InterceptionStorage-IntEvap
# I nthe origal HBV code
# RestEvap = pcr.max(0.0,self.PotEvaporation-IntEvap)
self.SoilEvap = pcr.ifthenelse(
self.SoilMoisture > self.Treshold,
pcr.min(self.SoilMoisture, RestEvap),
pcr.min(
self.SoilMoisture,
pcr.min(
RestEvap, self.PotEvaporation * (self.SoilMoisture / self.Treshold)
),
),
)
#: soil evapotranspiration
self.SoilMoisture = (
self.SoilMoisture - self.SoilEvap
) # evaporation from soil moisture storage
self.ActEvap = (
self.IntEvap + self.SoilEvap
) #: Sum of evaporation components (IntEvap+SoilEvap)
self.HBVSeepage = (
(pcr.min(self.SoilMoisture / self.FieldCapacity, 1)) ** self.BetaSeepage
) * NetInSoil # runoff water from soil
self.SoilMoisture = self.SoilMoisture - self.HBVSeepage
Backtosoil = pcr.min(
self.FieldCapacity - self.SoilMoisture, DirectRunoff
) # correction for extremely wet periods: soil is filled to capacity
self.DirectRunoff = DirectRunoff - Backtosoil
self.SoilMoisture = self.SoilMoisture + Backtosoil
self.InUpperZone = (
self.DirectRunoff + self.HBVSeepage
) # total water available for runoff
# Steps is always 1 at the moment
# calculations for Upper zone
self.UpperZoneStorage = (
self.UpperZoneStorage + self.InUpperZone
) # incoming water from soil
self.Percolation = pcr.min(
self.PERC, self.UpperZoneStorage - self.InUpperZone / 2
) # Percolation
self.UpperZoneStorage = self.UpperZoneStorage - self.Percolation
self.CapFlux = self.Cflux * (
((self.FieldCapacity - self.SoilMoisture) / self.FieldCapacity)
) #: Capillary flux flowing back to soil
self.CapFlux = pcr.min(self.UpperZoneStorage, self.CapFlux)
self.CapFlux = pcr.min(self.FieldCapacity - self.SoilMoisture, self.CapFlux)
self.UpperZoneStorage = self.UpperZoneStorage - self.CapFlux
self.SoilMoisture = self.SoilMoisture + self.CapFlux
if not self.SetKquickFlow:
self.QuickFlow = pcr.min(
pcr.ifthenelse(
self.Percolation < self.PERC,
0,
self.KQuickFlow
* (
(
self.UpperZoneStorage
- pcr.min(self.InUpperZone / 2, self.UpperZoneStorage)
)
** (1.0 + self.AlphaNL)
),
),
self.UpperZoneStorage,
)
self.UpperZoneStorage = pcr.max(
pcr.ifthenelse(
self.Percolation < self.PERC,
self.UpperZoneStorage,
self.UpperZoneStorage - self.QuickFlow,
),
0,
)
# QuickFlow_temp = pcr.max(0,self.KQuickFlow*(self.UpperZoneStorage**(1.0+self.AlphaNL)))
# self.QuickFlow = pcr.min(QuickFlow_temp,self.UpperZoneStorage)
self.RealQuickFlow = self.ZeroMap
else:
self.QuickFlow = self.KQuickFlow * self.UpperZoneStorage
self.RealQuickFlow = pcr.max(
0, self.K0 * (self.UpperZoneStorage - self.SUZ)
)
self.UpperZoneStorage = (
self.UpperZoneStorage - self.QuickFlow - self.RealQuickFlow
)
"""Quickflow volume in mm/timestep"""
# self.UpperZoneStorage=self.UpperZoneStorage-self.QuickFlow-self.RealQuickFlow
# calculations for Lower zone
self.LowerZoneStorage = self.LowerZoneStorage + self.Percolation
self.BaseFlow = pcr.min(
self.LowerZoneStorage, self.K4 * self.LowerZoneStorage
) #: Baseflow in mm/timestep
self.LowerZoneStorage = self.LowerZoneStorage - self.BaseFlow
# Direct runoff generation
if self.ExternalQbase:
DirectRunoffStorage = self.QuickFlow + self.Seepage + self.RealQuickFlow
else:
DirectRunoffStorage = self.QuickFlow + self.BaseFlow + self.RealQuickFlow
self.InSoil = InSoil
self.RainAndSnowmelt = RainAndSnowmelt
self.NetInSoil = NetInSoil
self.InwaterMM = pcr.max(0.0, DirectRunoffStorage)
self.Inwater = self.InwaterMM * self.ToCubic
# only run the reservoir module if needed
if self.nrresSimple > 0:
self.ReservoirVolume, self.Outflow, self.ResPercFull, self.DemandRelease = simplereservoir(
self.ReservoirVolume,
self.SurfaceRunoff,
self.ResMaxVolume,
self.ResTargetFullFrac,
self.ResMaxRelease,
self.ResDemand,
self.ResTargetMinFrac,
self.ReserVoirSimpleLocs,
timestepsecs=self.timestepsecs,
)
self.OutflowDwn = pcr.upstream(
self.TopoLddOrg, pcr.cover(self.Outflow, pcr.scalar(0.0))
)
self.Inflow = self.OutflowDwn + pcr.cover(self.Inflow, self.ZeroMap)
# else:
# self.Inflow= pcr.cover(self.Inflow,self.ZeroMap)
elif self.nrresComplex > 0:
self.ReservoirWaterLevel, self.Outflow, self.ReservoirPrecipitation, self.ReservoirEvaporation, self.ReservoirVolume = complexreservoir(
self.ReservoirWaterLevel,
self.ReserVoirComplexLocs,
self.LinkedReservoirLocs,
self.ResArea,
self.ResThreshold,
self.ResStorFunc,
self.ResOutflowFunc,
self.sh,
self.hq,
self.Res_b,
self.Res_e,
self.SurfaceRunoff,
self.ReserVoirPrecip,
self.ReserVoirPotEvap,
self.ReservoirComplexAreas,
self.wf_supplyJulianDOY(),
timestepsecs=self.timestepsecs,
)
self.OutflowDwn = pcr.upstream(
self.TopoLddOrg, pcr.cover(self.Outflow, pcr.scalar(0.0))
)
self.Inflow = self.OutflowDwn + pcr.cover(self.Inflow, self.ZeroMap)
else:
self.Inflow = pcr.cover(self.Inflow, self.ZeroMap)
self.QuickFlowCubic = (self.QuickFlow + self.RealQuickFlow) * self.ToCubic
self.BaseFlowCubic = self.BaseFlow * self.ToCubic
self.SurfaceWaterSupply = pcr.ifthenelse(
self.Inflow < 0.0,
pcr.max(-1.0 * self.Inwater, self.SurfaceRunoff),
self.ZeroMap,
)
self.Inwater = self.Inwater + pcr.ifthenelse(
self.SurfaceWaterSupply > 0, -1.0 * self.SurfaceWaterSupply, self.Inflow
)
##########################################################################
# Runoff calculation via Kinematic wave ##################################
##########################################################################
# per distance along stream
q = self.Inwater / self.DCL + self.ForecQ_qmec / self.DCL
self.OldSurfaceRunoff = self.SurfaceRunoff
self.SurfaceRunoff = pcr.kinematic(
self.TopoLdd,
self.SurfaceRunoff,
q,
self.Alpha,
self.Beta,
self.Tslice,
self.timestepsecs,
self.DCL,
) # m3/s
self.SurfaceRunoffMM = (
self.SurfaceRunoff * self.QMMConv
) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
InflowKinWaveCell = pcr.upstream(self.TopoLdd, self.SurfaceRunoff)
self.MassBalKinWave = (
(self.KinWaveVolume - self.OldKinWaveVolume) / self.timestepsecs
+ InflowKinWaveCell
+ self.Inwater
- self.SurfaceRunoff
)
Runoff = self.SurfaceRunoff
# Updating
# --------
# Assume a tss file with as many columns as outpulocs. Start updating for each non-missing value and start with the
# first column (nr 1). Assumes that outputloc and columns match!
if self.updating:
QM = pcr.timeinputscalar(self.updateFile, self.UpdateMap) * self.QMMConv
# Now update the state. Just add to the Ustore
# self.UStoreDepth = result
# No determine multiplication ratio for each gauge influence area.
# For missing gauges 1.0 is assumed (no change).
# UpDiff = pcr.areamaximum(QM, self.UpdateMap) - pcr.areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
UpRatio = pcr.areamaximum(QM, self.UpdateMap) / pcr.areamaximum(
self.SurfaceRunoffMM, self.UpdateMap
)
UpRatio = pcr.cover(pcr.areaaverage(UpRatio, self.TopoId), 1.0)
# Now split between Soil and Kyn wave
self.UpRatioKyn = pcr.min(
self.MaxUpdMult,
pcr.max(self.MinUpdMult, (UpRatio - 1.0) * self.UpFrac + 1.0),
)
UpRatioSoil = pcr.min(
self.MaxUpdMult,
pcr.max(self.MinUpdMult, (UpRatio - 1.0) * (1.0 - self.UpFrac) + 1.0),
)
# update/nudge self.UStoreDepth for the whole upstream area,
# not sure how much this helps or worsens things
UpdSoil = True
if UpdSoil:
toadd = (self.UpperZoneStorage * UpRatioSoil) - self.UpperZoneStorage
self.UpperZoneStorage = self.UpperZoneStorage + toadd
# Update the kinematic wave reservoir up to a maximum upstream distance
# TODO: add (much smaller) downstream updating also?
MM = (1.0 - self.UpRatioKyn) / self.UpdMaxDist
self.UpRatioKyn = MM * self.DistToUpdPt + self.UpRatioKyn
self.SurfaceRunoff = self.SurfaceRunoff * self.UpRatioKyn
self.SurfaceRunoffMM = (
self.SurfaceRunoff * self.QMMConv
) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
Runoff = self.SurfaceRunoff
self.QCatchmentMM = self.SurfaceRunoff * self.QMMConvUp
# self.RunoffCoeff = self.QCatchmentMM/catchmenttotal(self.Precipitation, self.TopoLdd)/catchmenttotal(pcr.cover(1.0), self.TopoLdd)
self.sumprecip = (
self.sumprecip + self.Precipitation
) # accumulated rainfall for water balance
self.sumevap = (
self.sumevap + self.ActEvap
) # accumulated evaporation for water balance
self.sumsoilevap = self.sumsoilevap + self.SoilEvap
self.sumpotevap = self.sumpotevap + self.PotEvaporation
self.sumtemp = self.sumtemp + self.Temperature
self.sumrunoff = (
self.sumrunoff + self.InwaterMM
) # accumulated Cell runoff for water balance
self.sumlevel = self.sumlevel + self.WaterLevel
self.suminflow = self.suminflow + self.Inflow
self.storage = (
self.FreeWater
+ self.DrySnow
+ self.SoilMoisture
+ self.UpperZoneStorage
+ self.LowerZoneStorage
)
# + self.InterceptionStorage
self.watbal = (
(self.initstorage - self.storage)
+ self.sumprecip
- self.sumsoilevap
- self.sumrunoff
)
# The main function is used to run the program from the command line
def main(argv=None):
"""
Perform command line execution of the model.
"""
global multpars
global updateCols
caseName = "default_hbv"
runId = "run_default"
configfile = "wflow_hbv.ini"
LogFileName = "wflow.log"
_lastTimeStep = 0
_firstTimeStep = 0
fewsrun = False
runinfoFile = "runinfo.xml"
timestepsecs = 86400
wflow_cloneMap = "wflow_subcatch.map"
NoOverWrite = 1
loglevel = logging.DEBUG
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
return
## Main model starts here
########################################################################
try:
opts, args = getopt.getopt(argv, "c:QXS:F:hC:Ii:T:R:u:s:P:p:Xx:U:fl:L:")
except getopt.error as msg:
pcrut.usage(msg)
for o, a in opts:
if o == "-F":
runinfoFile = a
fewsrun = True
if o == "-C":
caseName = a
if o == "-R":
runId = a
if o == "-L":
LogFileName = a
if o == "-l":
exec("loglevel = logging." + a)
if o == "-c":
configfile = a
if o == "-s":
timestepsecs = int(a)
if o == "-h":
usage()
if o == "-f":
NoOverWrite = 0
if fewsrun:
ts = getTimeStepsfromRuninfo(runinfoFile, timestepsecs)
starttime = getStartTimefromRuninfo(runinfoFile)
if ts:
_lastTimeStep = ts # * 86400/timestepsecs
_firstTimeStep = 1
else:
print("Failed to get timesteps from runinfo file: " + runinfoFile)
sys.exit(2)
else:
starttime = dt.datetime(1990, 1, 1)
if _lastTimeStep < _firstTimeStep:
print(
"The starttimestep ("
+ str(_firstTimeStep)
+ ") is smaller than the last timestep ("
+ str(_lastTimeStep)
+ ")"
)
usage()
myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
dynModelFw = wf_DynamicFramework(
myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
)
dynModelFw.createRunId(
NoOverWrite=NoOverWrite,
logfname=LogFileName,
level=loglevel,
doSetupFramework=False,
)
for o, a in opts:
if o == "-P":
left = a.split("=")[0]
right = a.split("=")[1]
configset(
myModel.config, "variable_change_once", left, right, overwrite=True
)
if o == "-p":
left = a.split("=")[0]
right = a.split("=")[1]
configset(
myModel.config, "variable_change_timestep", left, right, overwrite=True
)
if o == "-X":
configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
if o == "-I":
configset(myModel.config, "run", "reinit", "1", overwrite=True)
if o == "-i":
configset(myModel.config, "model", "intbl", a, overwrite=True)
if o == "-s":
configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
if o == "-x":
configset(myModel.config, "model", "sCatch", a, overwrite=True)
if o == "-c":
configset(myModel.config, "model", "configfile", a, overwrite=True)
if o == "-M":
configset(myModel.config, "model", "MassWasting", "0", overwrite=True)
if o == "-Q":
configset(myModel.config, "model", "ExternalQbase", "1", overwrite=True)
if o == "-U":
configset(myModel.config, "model", "updateFile", a, overwrite=True)
configset(myModel.config, "model", "updating", "1", overwrite=True)
if o == "-u":
exec("zz =" + a)
updateCols = zz
if o == "-T":
configset(myModel.config, "run", "endtime", a, overwrite=True)
if o == "-S":
configset(myModel.config, "run", "starttime", a, overwrite=True)
dynModelFw.setupFramework()
dynModelFw.logger.info("Command line: " + str(argv))
dynModelFw._runInitial()
dynModelFw._runResume()
# dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
os.chdir("../../")
if __name__ == "__main__":
main()
| 39.939502 | 215 | 0.576435 |
5a43206562b9b74bd7a66db1792484962fd94473 | 6,097 | py | Python | parsers/test/mocks/quality_check.py | Macquaria/electricitymap-contrib | 86ad87a07e6b1ba38ebf697f9a4710dbc5688754 | [
"MIT"
] | 1,582 | 2018-07-16T10:52:36.000Z | 2021-12-06T06:03:32.000Z | parsers/test/mocks/quality_check.py | aphansal123/electricitymap-contrib | cac8e3abfdebe2d66a832925e6f66d7283ba67b6 | [
"MIT"
] | 1,463 | 2018-07-09T12:23:35.000Z | 2021-12-06T08:11:37.000Z | parsers/test/mocks/quality_check.py | aphansal123/electricitymap-contrib | cac8e3abfdebe2d66a832925e6f66d7283ba67b6 | [
"MIT"
] | 650 | 2018-07-10T02:07:17.000Z | 2021-12-03T11:05:45.000Z | #!/usr/bin/python
"""
Test datapoints for quality.py
Each one is designed to test some part of the validation functions.
"""
import datetime
dt = datetime.datetime.utcnow()
prod = {
'biomass': 15.0,
'coal': 130.0,
'gas': 890.0,
'hydro': 500.0,
'nuclear': 345.7,
'oil': 0.0,
'solar': 60.0,
'wind': 75.0,
'geothermal': None,
'unknown': 3.0
}
c1 = {
'consumption': 1374.0,
'zoneKey': 'FR',
'datetime': dt,
'production': prod,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
c2 = {
'consumption': -1081.0,
'zoneKey': 'FR',
'datetime': dt,
'production': prod,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
c3 = {
'consumption': None,
'zoneKey': 'FR',
'datetime': dt,
'production': prod,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
e1 = {
'sortedZoneKeys': 'DK->NO',
'datetime': dt,
'netFlow': 73.0,
'source': 'mysource.com'
}
e2 = {
'sortedZoneKeys': 'DK->NO',
'netFlow': 73.0,
'source': 'mysource.com'
}
e3 = {
'sortedZoneKeys': 'DK->NO',
'datetime': 'At the 3rd beep the time will be......',
'netFlow': 73.0,
'source': 'mysource.com'
}
future = datetime.datetime.utcnow() + datetime.timedelta(seconds=5*60)
e4 = {
'sortedZoneKeys': 'DK->NO',
'datetime': future,
'netFlow': 73.0,
'source': 'mysource.com'
}
p1 = {
'zoneKey': 'FR',
'production': prod,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
p2 = {
'production': prod,
'datetime': dt,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
p3 = {
'zoneKey': 'FR',
'production': prod,
'datetime': '13th May 2017',
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
p4 = {
'zoneKey': 'BR',
'production': prod,
'datetime': dt,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
p5 = {
'zoneKey': 'BR',
'production': prod,
'datetime': future,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
p6 = {
'zoneKey': 'FR',
'production': {
'biomass': 10.0,
'coal': None,
'gas': None,
'hydro': 340.2,
'nuclear': 2390.0,
'oil': None,
'solar': 49.0,
'wind': 0.0,
'geothermal': 453.8,
'unknown': None
},
'datetime': dt,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
p7 = {
'zoneKey': 'CH',
'production': {
'biomass': 10.0,
'coal': None,
'gas': 780.0,
'hydro': 340.2,
'nuclear': 2390.0,
'oil': None,
'solar': 49.0,
'wind': 0.0,
'geothermal': 453.8,
'unknown': None
},
'datetime': dt,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
p8 = {
'zoneKey': 'FR',
'production': {
'biomass': 10.0,
'coal': 230.6,
'gas': 780.0,
'hydro': 340.2,
'nuclear': 2390.0,
'oil': 0.0,
'solar': 49.0,
'wind': 0.0,
'geothermal': -453.8,
'unknown': 0.0
},
'datetime': dt,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
p9 = {
'zoneKey': 'FR',
'production': {
'biomass': 10.0,
'coal': 230.6,
'gas': 780.0,
'hydro': 340.2,
'nuclear': 2390.0,
'oil': 0.0,
'solar': 49.0,
'wind': 0.0,
'geothermal': 453.8,
'unknown': 10.0
},
'datetime': dt,
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
p10 = {
'zoneKey': 'DE',
'production': {
'coal': 230.6,
'gas': 780.0,
'hydro': 340.2,
'nuclear': 2390.0,
'oil': 0.0,
'solar': 49.0,
'wind': 0.0,
'geothermal': 453.8,
'unknown': 10.0
},
'datetime': dt,
'source': 'mysource.com'
}
p11 = {
'zoneKey': 'PL',
'production': {
'coal': 230.6,
'gas': 780.0
},
'datetime': dt,
'source': 'mysource.com'
}
p12 = {
'zoneKey': 'SI',
'production': {
'biomass': 15,
'coal': 4000,
'gas': 14,
'geothermal': None,
'hydro': 856,
'nuclear': 692,
'oil': 0,
'solar': 94,
'unknown': None,
'wind': 1
},
'datetime': dt,
'source': 'mysource.com'
}
p13 = {
'zoneKey': 'DK-DK1',
'production': {
'oil': 1,
'unknown': 79,
'coal': 534,
'wind': 2000,
'biomass': 583,
'gas': 215
},
'datetime': dt,
'source': 'entsoe.eu',
}
p14 = {
'zoneKey': 'FI',
'production': {
'nuclear': 2565,
'oil': 1,
'unknown': 79,
'coal': 534,
'hydro': 2176,
'wind': 42,
'biomass': 583,
'gas': 215,
'geothermal': None,
'solar': None
},
'datetime': dt,
'source': 'entsoe.eu',
}
| 20.667797 | 70 | 0.37051 |
b6c51671d563be4a6c7995daaf02f33f84217fd9 | 2,729 | py | Python | tools/cryptoloop2hashcat.py | Masha/hashcat | 178003d692ab72abfd9588fdce9e4f569d2b1aa7 | [
"MIT"
] | 13,663 | 2015-12-04T16:08:29.000Z | 2022-03-31T23:43:17.000Z | tools/cryptoloop2hashcat.py | Masha/hashcat | 178003d692ab72abfd9588fdce9e4f569d2b1aa7 | [
"MIT"
] | 2,014 | 2015-12-04T16:45:36.000Z | 2022-03-31T21:02:58.000Z | tools/cryptoloop2hashcat.py | Masha/hashcat | 178003d692ab72abfd9588fdce9e4f569d2b1aa7 | [
"MIT"
] | 2,555 | 2015-12-04T16:09:31.000Z | 2022-03-31T11:34:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Gabriele 'matrix' Gristina
# Version: 1.0
# Date: Fri May 7 01:02:56 CEST 2021
# License: MIT
import argparse
import os.path
import sys
kern_type = -1
hash_mode = -1
hash_modes = [ 14511, 14512, 14513, 14521, 14522, 14523, 14531, 14532, 14533, 14541, 14542, 14543, 14551, 14552, 14553 ]
def validate_source(parser, arg):
if not os.path.exists(arg):
parser.error("! Invalid source argument: does not exist")
else:
return open(arg, 'rb')
def validate_keysize(parser, ks):
if ks == '128':
return 0
elif ks == '192':
return 1
elif ks == '256':
return 2
else:
parser.error("! Invalid key size selected ...")
def valid_hash_cipher(hash, cipher):
if hash != 'sha1' and hash != 'sha256' and hash != 'sha512' and hash != 'ripemd160' and hash != 'whirlpool':
print("! Invalid or not supported hash type")
exit(1)
if cipher != 'aes' and cipher != 'serpent' and cipher != 'twofish':
print("! Invalid or not supported cipher")
exit(1)
if hash == 'sha1':
if cipher == 'aes':
return 0
elif cipher == 'serpent':
return 1
else:
return 2
elif hash == 'sha256':
if cipher == 'aes':
return 3
elif args.cipher == 'serpent':
return 4
else:
return 5
elif hash == 'sha512':
if cipher == 'aes':
return 6
elif cipher == 'serpent':
return 7
else:
return 8
elif hash == 'ripemd160':
if cipher == 'aes':
return 9
elif cipher == 'serpent':
return 10
else:
return 11
else: # whirlpool
if cipher == 'aes':
return 12
elif cipher == 'serpent':
return 13
else:
return 14
parser = argparse.ArgumentParser(description='cryptoloop2hashcat extraction tool')
parser.add_argument('--source', required=True, help='set cryptoloop disk/image from path', type=lambda src: validate_source(parser, src))
parser.add_argument('--hash', required=True, help='set hash type. Supported: sha1, sha256, sha512, ripemd160 or whirlpool.')
parser.add_argument('--cipher', required=True, help='set cipher type. Supported: aes, serpent or twofish.')
parser.add_argument('--keysize', required=True, help='set key size. Supported: 128, 192 or 256.', type=lambda ks: validate_keysize(parser, ks))
args = parser.parse_args()
kern_type = valid_hash_cipher(args.hash, args.cipher)
hash_mode = hash_modes[kern_type]
key_size = args.keysize
f = args.source
f.seek(1536)
if sys.version_info[0] == 3:
ct = f.read(16).hex()
else:
ct = f.read(16).encode('hex')
f.close()
print('$cryptoapi$' + str(kern_type) + '$' + str(key_size) + '$03000000000000000000000000000000$00000000000000000000000000000000$' + ct)
| 26.754902 | 143 | 0.648223 |
c8201e26211b2eab4d0c998d38fb57c4a4af9fef | 5,179 | py | Python | invenio_records_resources/resources/files/resource.py | inveniosoftware/invenio-resources | f1fb9a849d03af1d6ec4cddfc4e140a06788783b | [
"MIT"
] | null | null | null | invenio_records_resources/resources/files/resource.py | inveniosoftware/invenio-resources | f1fb9a849d03af1d6ec4cddfc4e140a06788783b | [
"MIT"
] | 19 | 2020-05-18T12:04:54.000Z | 2020-07-13T06:19:27.000Z | invenio_records_resources/resources/files/resource.py | inveniosoftware/invenio-resources | f1fb9a849d03af1d6ec4cddfc4e140a06788783b | [
"MIT"
] | 5 | 2020-04-28T09:07:43.000Z | 2020-07-01T14:43:01.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Invenio Record File Resources."""
import marshmallow as ma
from flask import g
from flask_resources import (
JSONDeserializer,
RequestBodyParser,
Resource,
request_body_parser,
request_parser,
resource_requestctx,
response_handler,
route,
)
from ..errors import ErrorHandlersMixin
from .parser import RequestStreamParser
#
# Decorator helpers
#
request_view_args = request_parser(
{"pid_value": ma.fields.Str(required=True), "key": ma.fields.Str()},
location="view_args",
)
request_data = request_body_parser(
parsers={"application/json": RequestBodyParser(JSONDeserializer())},
default_content_type="application/json",
)
request_stream = request_body_parser(
parsers={"application/octet-stream": RequestStreamParser()},
default_content_type="application/octet-stream",
)
#
# Resource
#
class FileResource(ErrorHandlersMixin, Resource):
"""File resource."""
def __init__(self, config, service):
"""Constructor."""
super(FileResource, self).__init__(config)
self.service = service
def create_url_rules(self):
"""Routing for the views."""
routes = self.config.routes
url_rules = [
route("GET", routes["list"], self.search),
route("GET", routes["item"], self.read),
route("GET", routes["item-content"], self.read_content),
]
if self.config.allow_upload:
url_rules += [
route("POST", routes["list"], self.create),
route("DELETE", routes["list"], self.delete_all),
route("PUT", routes["item"], self.update),
route("DELETE", routes["item"], self.delete),
route("POST", routes["item-commit"], self.create_commit),
route("PUT", routes["item-content"], self.update_content),
]
return url_rules
@request_view_args
@response_handler(many=True)
def search(self):
"""List files."""
files = self.service.list_files(
g.identity,
resource_requestctx.view_args["pid_value"],
)
return files.to_dict(), 200
@request_view_args
def delete_all(self):
"""Delete all files."""
self.service.delete_all_files(
g.identity,
resource_requestctx.view_args["pid_value"],
)
return "", 204
@request_view_args
@request_data
@response_handler()
def create(self):
"""Initialize an upload on a record."""
item = self.service.init_files(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.data or [],
)
return item.to_dict(), 201
@request_view_args
@response_handler()
def read(self):
"""Read a single file."""
item = self.service.read_file_metadata(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.view_args["key"],
)
return item.to_dict(), 200
@request_view_args
@request_data
@response_handler()
def update(self):
"""Update the metadata a single file."""
item = self.service.update_file_metadata(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.view_args["key"],
resource_requestctx.data or {},
)
return item.to_dict(), 200
@request_view_args
def delete(self):
"""Delete a file."""
self.service.delete_file(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.view_args["key"],
)
return "", 204
@request_view_args
@response_handler()
def create_commit(self):
"""Commit a file."""
item = self.service.commit_file(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.view_args["key"],
)
return item.to_dict(), 200
@request_view_args
def read_content(self):
"""Read file content."""
item = self.service.get_file_content(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.view_args["key"],
)
return item.send_file(), 200
@request_view_args
@request_stream
@response_handler()
def update_content(self):
"""Upload file content."""
# TODO: Parse in `resource_requestctx`
item = self.service.set_file_content(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.view_args["key"],
resource_requestctx.data["request_stream"],
content_length=resource_requestctx.data["request_content_length"],
)
return item.to_dict(), 200
| 28.932961 | 78 | 0.611701 |
fb08cdc0e4da2276295a3e110aa1c6adec19343e | 2,062 | py | Python | pyecca/estimators/attitude/launch.py | Mohit-Pathak/pyecca | d47d16aae646d8cab633a6d792233f676ed6c61c | [
"BSD-3-Clause"
] | null | null | null | pyecca/estimators/attitude/launch.py | Mohit-Pathak/pyecca | d47d16aae646d8cab633a6d792233f676ed6c61c | [
"BSD-3-Clause"
] | null | null | null | pyecca/estimators/attitude/launch.py | Mohit-Pathak/pyecca | d47d16aae646d8cab633a6d792233f676ed6c61c | [
"BSD-3-Clause"
] | 2 | 2019-09-25T03:20:15.000Z | 2019-10-05T02:11:40.000Z | import multiprocessing as mp
import numpy as np
from pyecca import replay
from pyecca import uros
from pyecca.estimators.attitude import algorithms
from pyecca.estimators.attitude.estimator import AttitudeEstimator
from pyecca.estimators.attitude.simulator import Simulator
default_params = {
't0': 0,
'tf': 1,
'n_monte_carlo': 1,
'replay_log_file': None,
'name': 'default',
'initialize': True,
'estimators': [],
'x0': [0, 0, 0, 0, 0, 0],
'params': {}
}
eqs = algorithms.eqs()
def init_params(params):
p = dict(default_params)
for k, v in params.items():
if k not in p.keys():
raise KeyError(k)
p[k] = v
return p
def launch_sim(params):
p = init_params(params)
core = uros.Core()
Simulator(core, eqs, p['x0'])
for name in p['estimators']:
AttitudeEstimator(core, name, eqs[name], p['initialize'])
logger = uros.Logger(core)
core.init_params()
for k, v in p['params'].items():
core.set_param(k, v)
core.run(until=p['tf'])
print(p['name'], 'done')
return logger.get_log_as_array()
def launch_monte_carlo_sim(params):
p = init_params(params)
if p['n_monte_carlo'] == 1:
d = dict(p)
d.pop('n_monte_carlo')
data = [launch_sim(d)]
else:
new_params = []
for i in range(p['n_monte_carlo']):
d = dict(p)
d.pop('n_monte_carlo')
d['name'] = i
new_params.append(d)
with mp.Pool(mp.cpu_count()) as pool:
data = np.array(pool.map(launch_sim, new_params))
return data
def launch_replay(params):
p = init_params(params)
core = uros.Core()
replay.ULogReplay(core, p['replay_log_file'])
for name in p['estimators']:
AttitudeEstimator(core, name, eqs[name], p['initialize'])
logger = uros.Logger(core)
core.init_params()
for k, v in p['params'].items():
core.set_param(k, v)
core.run(until=p['tf'])
print(p['name'], 'done')
return logger.get_log_as_array()
| 25.45679 | 66 | 0.605238 |
6065bb07c704e332a5222a70eb24f4405dd3e5b4 | 28,674 | py | Python | nltk/parse/nonprojectivedependencyparser.py | sgkasselau/nltk | 6f18915d88f9bcaff74f5578633c81ef3b73e7f4 | [
"Apache-2.0"
] | null | null | null | nltk/parse/nonprojectivedependencyparser.py | sgkasselau/nltk | 6f18915d88f9bcaff74f5578633c81ef3b73e7f4 | [
"Apache-2.0"
] | null | null | null | nltk/parse/nonprojectivedependencyparser.py | sgkasselau/nltk | 6f18915d88f9bcaff74f5578633c81ef3b73e7f4 | [
"Apache-2.0"
] | null | null | null | # Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2022 NLTK Project
# Author: Jason Narad <jason.narad@gmail.com>
#
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
#
import logging
import math
from nltk.parse.dependencygraph import DependencyGraph
logger = logging.getLogger(__name__)
#################################################################
# DependencyScorerI - Interface for Graph-Edge Weight Calculation
#################################################################
class DependencyScorerI:
"""
A scorer for calculated the weights on the edges of a weighted
dependency graph. This is used by a
``ProbabilisticNonprojectiveParser`` to initialize the edge
weights of a ``DependencyGraph``. While typically this would be done
by training a binary classifier, any class that can return a
multidimensional list representation of the edge weights can
implement this interface. As such, it has no necessary
fields.
"""
def __init__(self):
if self.__class__ == DependencyScorerI:
raise TypeError("DependencyScorerI is an abstract interface")
def train(self, graphs):
"""
:type graphs: list(DependencyGraph)
:param graphs: A list of dependency graphs to train the scorer.
Typically the edges present in the graphs can be used as
positive training examples, and the edges not present as negative
examples.
"""
raise NotImplementedError()
def score(self, graph):
"""
:type graph: DependencyGraph
:param graph: A dependency graph whose set of edges need to be
scored.
:rtype: A three-dimensional list of numbers.
:return: The score is returned in a multidimensional(3) list, such
that the outer-dimension refers to the head, and the
inner-dimension refers to the dependencies. For instance,
scores[0][1] would reference the list of scores corresponding to
arcs from node 0 to node 1. The node's 'address' field can be used
to determine its number identification.
For further illustration, a score list corresponding to Fig.2 of
Keith Hall's 'K-best Spanning Tree Parsing' paper::
scores = [[[], [5], [1], [1]],
[[], [], [11], [4]],
[[], [10], [], [5]],
[[], [8], [8], []]]
When used in conjunction with a MaxEntClassifier, each score would
correspond to the confidence of a particular edge being classified
with the positive training examples.
"""
raise NotImplementedError()
#################################################################
# NaiveBayesDependencyScorer
#################################################################
class NaiveBayesDependencyScorer(DependencyScorerI):
"""
A dependency scorer built around a MaxEnt classifier. In this
particular class that classifier is a ``NaiveBayesClassifier``.
It uses head-word, head-tag, child-word, and child-tag features
for classification.
>>> from nltk.parse.dependencygraph import DependencyGraph, conll_data2
>>> graphs = [DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry]
>>> npp = ProbabilisticNonprojectiveParser()
>>> npp.train(graphs, NaiveBayesDependencyScorer())
>>> parses = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc'])
>>> len(list(parses))
1
"""
def __init__(self):
pass # Do nothing without throwing error
def train(self, graphs):
"""
Trains a ``NaiveBayesClassifier`` using the edges present in
graphs list as positive examples, the edges not present as
negative examples. Uses a feature vector of head-word,
head-tag, child-word, and child-tag.
:type graphs: list(DependencyGraph)
:param graphs: A list of dependency graphs to train the scorer.
"""
from nltk.classify import NaiveBayesClassifier
# Create training labeled training examples
labeled_examples = []
for graph in graphs:
for head_node in graph.nodes.values():
for child_index, child_node in graph.nodes.items():
if child_index in head_node["deps"]:
label = "T"
else:
label = "F"
labeled_examples.append(
(
dict(
a=head_node["word"],
b=head_node["tag"],
c=child_node["word"],
d=child_node["tag"],
),
label,
)
)
self.classifier = NaiveBayesClassifier.train(labeled_examples)
def score(self, graph):
"""
Converts the graph into a feature-based representation of
each edge, and then assigns a score to each based on the
confidence of the classifier in assigning it to the
positive label. Scores are returned in a multidimensional list.
:type graph: DependencyGraph
:param graph: A dependency graph to score.
:rtype: 3 dimensional list
:return: Edge scores for the graph parameter.
"""
# Convert graph to feature representation
edges = []
for head_node in graph.nodes.values():
for child_node in graph.nodes.values():
edges.append(
dict(
a=head_node["word"],
b=head_node["tag"],
c=child_node["word"],
d=child_node["tag"],
)
)
# Score edges
edge_scores = []
row = []
count = 0
for pdist in self.classifier.prob_classify_many(edges):
logger.debug("%.4f %.4f", pdist.prob("T"), pdist.prob("F"))
# smoothing in case the probability = 0
row.append([math.log(pdist.prob("T") + 0.00000000001)])
count += 1
if count == len(graph.nodes):
edge_scores.append(row)
row = []
count = 0
return edge_scores
#################################################################
# A Scorer for Demo Purposes
#################################################################
# A short class necessary to show parsing example from paper
class DemoScorer(DependencyScorerI):
def train(self, graphs):
print("Training...")
def score(self, graph):
# scores for Keith Hall 'K-best Spanning Tree Parsing' paper
return [
[[], [5], [1], [1]],
[[], [], [11], [4]],
[[], [10], [], [5]],
[[], [8], [8], []],
]
#################################################################
# Non-Projective Probabilistic Parsing
#################################################################
class ProbabilisticNonprojectiveParser:
"""A probabilistic non-projective dependency parser.
Nonprojective dependencies allows for "crossing branches" in the parse tree
which is necessary for representing particular linguistic phenomena, or even
typical parses in some languages. This parser follows the MST parsing
algorithm, outlined in McDonald(2005), which likens the search for the best
non-projective parse to finding the maximum spanning tree in a weighted
directed graph.
>>> class Scorer(DependencyScorerI):
... def train(self, graphs):
... pass
...
... def score(self, graph):
... return [
... [[], [5], [1], [1]],
... [[], [], [11], [4]],
... [[], [10], [], [5]],
... [[], [8], [8], []],
... ]
>>> npp = ProbabilisticNonprojectiveParser()
>>> npp.train([], Scorer())
>>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None])
>>> len(list(parses))
1
Rule based example
>>> from nltk.grammar import DependencyGrammar
>>> grammar = DependencyGrammar.fromstring('''
... 'taught' -> 'play' | 'man'
... 'man' -> 'the' | 'in'
... 'in' -> 'corner'
... 'corner' -> 'the'
... 'play' -> 'golf' | 'dachshund' | 'to'
... 'dachshund' -> 'his'
... ''')
>>> ndp = NonprojectiveDependencyParser(grammar)
>>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf'])
>>> len(list(parses))
4
"""
def __init__(self):
"""
Creates a new non-projective parser.
"""
logging.debug("initializing prob. nonprojective...")
def train(self, graphs, dependency_scorer):
"""
Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects,
and establishes this as the parser's scorer. This is used to
initialize the scores on a ``DependencyGraph`` during the parsing
procedure.
:type graphs: list(DependencyGraph)
:param graphs: A list of dependency graphs to train the scorer.
:type dependency_scorer: DependencyScorerI
:param dependency_scorer: A scorer which implements the
``DependencyScorerI`` interface.
"""
self._scorer = dependency_scorer
self._scorer.train(graphs)
def initialize_edge_scores(self, graph):
"""
Assigns a score to every edge in the ``DependencyGraph`` graph.
These scores are generated via the parser's scorer which
was assigned during the training process.
:type graph: DependencyGraph
:param graph: A dependency graph to assign scores to.
"""
self.scores = self._scorer.score(graph)
def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph):
"""
Takes a list of nodes that have been identified to belong to a cycle,
and collapses them into on larger node. The arcs of all nodes in
the graph must be updated to account for this.
:type new_node: Node.
:param new_node: A Node (Dictionary) to collapse the cycle nodes into.
:type cycle_path: A list of integers.
:param cycle_path: A list of node addresses, each of which is in the cycle.
:type g_graph, b_graph, c_graph: DependencyGraph
:param g_graph, b_graph, c_graph: Graphs which need to be updated.
"""
logger.debug("Collapsing nodes...")
# Collapse all cycle nodes into v_n+1 in G_Graph
for cycle_node_index in cycle_path:
g_graph.remove_by_address(cycle_node_index)
g_graph.add_node(new_node)
g_graph.redirect_arcs(cycle_path, new_node["address"])
def update_edge_scores(self, new_node, cycle_path):
"""
Updates the edge scores to reflect a collapse operation into
new_node.
:type new_node: A Node.
:param new_node: The node which cycle nodes are collapsed into.
:type cycle_path: A list of integers.
:param cycle_path: A list of node addresses that belong to the cycle.
"""
logger.debug("cycle %s", cycle_path)
cycle_path = self.compute_original_indexes(cycle_path)
logger.debug("old cycle %s", cycle_path)
logger.debug("Prior to update: %s", self.scores)
for i, row in enumerate(self.scores):
for j, column in enumerate(self.scores[i]):
logger.debug(self.scores[i][j])
if j in cycle_path and i not in cycle_path and self.scores[i][j]:
subtract_val = self.compute_max_subtract_score(j, cycle_path)
logger.debug("%s - %s", self.scores[i][j], subtract_val)
new_vals = []
for cur_val in self.scores[i][j]:
new_vals.append(cur_val - subtract_val)
self.scores[i][j] = new_vals
for i, row in enumerate(self.scores):
for j, cell in enumerate(self.scores[i]):
if i in cycle_path and j in cycle_path:
self.scores[i][j] = []
logger.debug("After update: %s", self.scores)
def compute_original_indexes(self, new_indexes):
"""
As nodes are collapsed into others, they are replaced
by the new node in the graph, but it's still necessary
to keep track of what these original nodes were. This
takes a list of node addresses and replaces any collapsed
node addresses with their original addresses.
:type new_indexes: A list of integers.
:param new_indexes: A list of node addresses to check for
subsumed nodes.
"""
swapped = True
while swapped:
originals = []
swapped = False
for new_index in new_indexes:
if new_index in self.inner_nodes:
for old_val in self.inner_nodes[new_index]:
if old_val not in originals:
originals.append(old_val)
swapped = True
else:
originals.append(new_index)
new_indexes = originals
return new_indexes
def compute_max_subtract_score(self, column_index, cycle_indexes):
"""
When updating scores the score of the highest-weighted incoming
arc is subtracted upon collapse. This returns the correct
amount to subtract from that edge.
:type column_index: integer.
:param column_index: A index representing the column of incoming arcs
to a particular node being updated
:type cycle_indexes: A list of integers.
:param cycle_indexes: Only arcs from cycle nodes are considered. This
is a list of such nodes addresses.
"""
max_score = -100000
for row_index in cycle_indexes:
for subtract_val in self.scores[row_index][column_index]:
if subtract_val > max_score:
max_score = subtract_val
return max_score
def best_incoming_arc(self, node_index):
"""
Returns the source of the best incoming arc to the
node with address: node_index
:type node_index: integer.
:param node_index: The address of the 'destination' node,
the node that is arced to.
"""
originals = self.compute_original_indexes([node_index])
logger.debug("originals: %s", originals)
max_arc = None
max_score = None
for row_index in range(len(self.scores)):
for col_index in range(len(self.scores[row_index])):
if col_index in originals and (
max_score is None or self.scores[row_index][col_index] > max_score
):
max_score = self.scores[row_index][col_index]
max_arc = row_index
logger.debug("%s, %s", row_index, col_index)
logger.debug(max_score)
for key in self.inner_nodes:
replaced_nodes = self.inner_nodes[key]
if max_arc in replaced_nodes:
return key
return max_arc
def original_best_arc(self, node_index):
originals = self.compute_original_indexes([node_index])
max_arc = None
max_score = None
max_orig = None
for row_index in range(len(self.scores)):
for col_index in range(len(self.scores[row_index])):
if col_index in originals and (
max_score is None or self.scores[row_index][col_index] > max_score
):
max_score = self.scores[row_index][col_index]
max_arc = row_index
max_orig = col_index
return [max_arc, max_orig]
def parse(self, tokens, tags):
"""
Parses a list of tokens in accordance to the MST parsing algorithm
for non-projective dependency parses. Assumes that the tokens to
be parsed have already been tagged and those tags are provided. Various
scoring methods can be used by implementing the ``DependencyScorerI``
interface and passing it to the training algorithm.
:type tokens: list(str)
:param tokens: A list of words or punctuation to be parsed.
:type tags: list(str)
:param tags: A list of tags corresponding by index to the words in the tokens list.
:return: An iterator of non-projective parses.
:rtype: iter(DependencyGraph)
"""
self.inner_nodes = {}
# Initialize g_graph
g_graph = DependencyGraph()
for index, token in enumerate(tokens):
g_graph.nodes[index + 1].update(
{"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1}
)
# Fully connect non-root nodes in g_graph
g_graph.connect_graph()
original_graph = DependencyGraph()
for index, token in enumerate(tokens):
original_graph.nodes[index + 1].update(
{"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1}
)
b_graph = DependencyGraph()
c_graph = DependencyGraph()
for index, token in enumerate(tokens):
c_graph.nodes[index + 1].update(
{"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1}
)
# Assign initial scores to g_graph edges
self.initialize_edge_scores(g_graph)
logger.debug(self.scores)
# Initialize a list of unvisited vertices (by node address)
unvisited_vertices = [vertex["address"] for vertex in c_graph.nodes.values()]
# Iterate over unvisited vertices
nr_vertices = len(tokens)
betas = {}
while unvisited_vertices:
# Mark current node as visited
current_vertex = unvisited_vertices.pop(0)
logger.debug("current_vertex: %s", current_vertex)
# Get corresponding node n_i to vertex v_i
current_node = g_graph.get_by_address(current_vertex)
logger.debug("current_node: %s", current_node)
# Get best in-edge node b for current node
best_in_edge = self.best_incoming_arc(current_vertex)
betas[current_vertex] = self.original_best_arc(current_vertex)
logger.debug("best in arc: %s --> %s", best_in_edge, current_vertex)
# b_graph = Union(b_graph, b)
for new_vertex in [current_vertex, best_in_edge]:
b_graph.nodes[new_vertex].update(
{"word": "TEMP", "rel": "NTOP", "address": new_vertex}
)
b_graph.add_arc(best_in_edge, current_vertex)
# Beta(current node) = b - stored for parse recovery
# If b_graph contains a cycle, collapse it
cycle_path = b_graph.contains_cycle()
if cycle_path:
# Create a new node v_n+1 with address = len(nodes) + 1
new_node = {"word": "NONE", "rel": "NTOP", "address": nr_vertices + 1}
# c_graph = Union(c_graph, v_n+1)
c_graph.add_node(new_node)
# Collapse all nodes in cycle C into v_n+1
self.update_edge_scores(new_node, cycle_path)
self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph)
for cycle_index in cycle_path:
c_graph.add_arc(new_node["address"], cycle_index)
# self.replaced_by[cycle_index] = new_node['address']
self.inner_nodes[new_node["address"]] = cycle_path
# Add v_n+1 to list of unvisited vertices
unvisited_vertices.insert(0, nr_vertices + 1)
# increment # of nodes counter
nr_vertices += 1
# Remove cycle nodes from b_graph; B = B - cycle c
for cycle_node_address in cycle_path:
b_graph.remove_by_address(cycle_node_address)
logger.debug("g_graph: %s", g_graph)
logger.debug("b_graph: %s", b_graph)
logger.debug("c_graph: %s", c_graph)
logger.debug("Betas: %s", betas)
logger.debug("replaced nodes %s", self.inner_nodes)
# Recover parse tree
logger.debug("Final scores: %s", self.scores)
logger.debug("Recovering parse...")
for i in range(len(tokens) + 1, nr_vertices + 1):
betas[betas[i][1]] = betas[i]
logger.debug("Betas: %s", betas)
for node in original_graph.nodes.values():
# TODO: It's dangerous to assume that deps it a dictionary
# because it's a default dictionary. Ideally, here we should not
# be concerned how dependencies are stored inside of a dependency
# graph.
node["deps"] = {}
for i in range(1, len(tokens) + 1):
original_graph.add_arc(betas[i][0], betas[i][1])
logger.debug("Done.")
yield original_graph
#################################################################
# Rule-based Non-Projective Parser
#################################################################
class NonprojectiveDependencyParser:
"""
A non-projective, rule-based, dependency parser. This parser
will return the set of all possible non-projective parses based on
the word-to-word relations defined in the parser's dependency
grammar, and will allow the branches of the parse tree to cross
in order to capture a variety of linguistic phenomena that a
projective parser will not.
"""
def __init__(self, dependency_grammar):
"""
Creates a new ``NonprojectiveDependencyParser``.
:param dependency_grammar: a grammar of word-to-word relations.
:type dependency_grammar: DependencyGrammar
"""
self._grammar = dependency_grammar
def parse(self, tokens):
"""
Parses the input tokens with respect to the parser's grammar. Parsing
is accomplished by representing the search-space of possible parses as
a fully-connected directed graph. Arcs that would lead to ungrammatical
parses are removed and a lattice is constructed of length n, where n is
the number of input tokens, to represent all possible grammatical
traversals. All possible paths through the lattice are then enumerated
to produce the set of non-projective parses.
param tokens: A list of tokens to parse.
type tokens: list(str)
return: An iterator of non-projective parses.
rtype: iter(DependencyGraph)
"""
# Create graph representation of tokens
self._graph = DependencyGraph()
for index, token in enumerate(tokens):
self._graph.nodes[index] = {
"word": token,
"deps": [],
"rel": "NTOP",
"address": index,
}
for head_node in self._graph.nodes.values():
deps = []
for dep_node in self._graph.nodes.values():
if (
self._grammar.contains(head_node["word"], dep_node["word"])
and head_node["word"] != dep_node["word"]
):
deps.append(dep_node["address"])
head_node["deps"] = deps
# Create lattice of possible heads
roots = []
possible_heads = []
for i, word in enumerate(tokens):
heads = []
for j, head in enumerate(tokens):
if (i != j) and self._grammar.contains(head, word):
heads.append(j)
if len(heads) == 0:
roots.append(i)
possible_heads.append(heads)
# Set roots to attempt
if len(roots) < 2:
if len(roots) == 0:
for i in range(len(tokens)):
roots.append(i)
# Traverse lattice
analyses = []
for _ in roots:
stack = []
analysis = [[] for i in range(len(possible_heads))]
i = 0
forward = True
while i >= 0:
if forward:
if len(possible_heads[i]) == 1:
analysis[i] = possible_heads[i][0]
elif len(possible_heads[i]) == 0:
analysis[i] = -1
else:
head = possible_heads[i].pop()
analysis[i] = head
stack.append([i, head])
if not forward:
index_on_stack = False
for stack_item in stack:
if stack_item[0] == i:
index_on_stack = True
orig_length = len(possible_heads[i])
if index_on_stack and orig_length == 0:
for j in range(len(stack) - 1, -1, -1):
stack_item = stack[j]
if stack_item[0] == i:
possible_heads[i].append(stack.pop(j)[1])
elif index_on_stack and orig_length > 0:
head = possible_heads[i].pop()
analysis[i] = head
stack.append([i, head])
forward = True
if i + 1 == len(possible_heads):
analyses.append(analysis[:])
forward = False
if forward:
i += 1
else:
i -= 1
# Filter parses
# ensure 1 root, every thing has 1 head
for analysis in analyses:
if analysis.count(-1) > 1:
# there are several root elements!
continue
graph = DependencyGraph()
graph.root = graph.nodes[analysis.index(-1) + 1]
for address, (token, head_index) in enumerate(
zip(tokens, analysis), start=1
):
head_address = head_index + 1
node = graph.nodes[address]
node.update({"word": token, "address": address})
if head_address == 0:
rel = "ROOT"
else:
rel = ""
graph.nodes[head_index + 1]["deps"][rel].append(address)
# TODO: check for cycles
yield graph
#################################################################
# Demos
#################################################################
def demo():
# hall_demo()
nonprojective_conll_parse_demo()
rule_based_demo()
def hall_demo():
npp = ProbabilisticNonprojectiveParser()
npp.train([], DemoScorer())
for parse_graph in npp.parse(["v1", "v2", "v3"], [None, None, None]):
print(parse_graph)
def nonprojective_conll_parse_demo():
from nltk.parse.dependencygraph import conll_data2
graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
npp = ProbabilisticNonprojectiveParser()
npp.train(graphs, NaiveBayesDependencyScorer())
for parse_graph in npp.parse(
["Cathy", "zag", "hen", "zwaaien", "."], ["N", "V", "Pron", "Adj", "N", "Punc"]
):
print(parse_graph)
def rule_based_demo():
from nltk.grammar import DependencyGrammar
grammar = DependencyGrammar.fromstring(
"""
'taught' -> 'play' | 'man'
'man' -> 'the' | 'in'
'in' -> 'corner'
'corner' -> 'the'
'play' -> 'golf' | 'dachshund' | 'to'
'dachshund' -> 'his'
"""
)
print(grammar)
ndp = NonprojectiveDependencyParser(grammar)
graphs = ndp.parse(
[
"the",
"man",
"in",
"the",
"corner",
"taught",
"his",
"dachshund",
"to",
"play",
"golf",
]
)
print("Graphs:")
for graph in graphs:
print(graph)
if __name__ == "__main__":
demo()
| 37.094437 | 117 | 0.54879 |
30eb012ea52fc2fb19cd778404a233edf5425f7b | 396 | py | Python | employees/migrations/0002_employee_featured_image.py | mwororokevin/django-property-site | a477d80cbac04fde33920d9b5e5f9de783d55fbd | [
"MIT"
] | null | null | null | employees/migrations/0002_employee_featured_image.py | mwororokevin/django-property-site | a477d80cbac04fde33920d9b5e5f9de783d55fbd | [
"MIT"
] | null | null | null | employees/migrations/0002_employee_featured_image.py | mwororokevin/django-property-site | a477d80cbac04fde33920d9b5e5f9de783d55fbd | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-04-01 09:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employees', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='employee',
name='featured_image',
field=models.ImageField(null=True, upload_to=''),
),
]
| 20.842105 | 61 | 0.59596 |
95b49901db6d16f85a38b1b2c648e1369a95da31 | 1,040 | py | Python | main/entities/wall.py | ouriquegustavo/fightyourstreamer | a9bd3bd7b34817842b0bf0714bd8dc0d3b51b8ba | [
"MIT"
] | null | null | null | main/entities/wall.py | ouriquegustavo/fightyourstreamer | a9bd3bd7b34817842b0bf0714bd8dc0d3b51b8ba | [
"MIT"
] | null | null | null | main/entities/wall.py | ouriquegustavo/fightyourstreamer | a9bd3bd7b34817842b0bf0714bd8dc0d3b51b8ba | [
"MIT"
] | null | null | null | from main.entities.entity import Entity
import pygame
class Wall(Entity):
@property
def x(self):
return (self.xi + self.xf) / 2
@property
def y(self):
return (self.yi + self.yf) / 2
@property
def dx(self):
return self.xf - self.xi
@property
def dy(self):
return self.yf - self.yi
def __init__(self, game, gid, xi, xf, yi, yf):
super().__init__(game, gid)
self.kind = 'wall'
self.xi = xi
self.yi = yi
self.xf = xf
self.yf = yf
self.collision_mask.add(1)
self.collision_mask.add(2)
self.zorder = 1
self.colour = (0, 0, 0, 255)
self.sprite = pygame.Surface((self.dx, self.dy), flags=pygame.SRCALPHA)
self.sprite.fill(self.colour)
self.is_drawing = True
def draw(self):
x = self.xi - self.game.camera.x + self.game.display.w / 2
y = self.yi - self.game.camera.y + self.game.display.h / 2
self.game.display.blit(self.sprite, (x, y))
| 22.608696 | 79 | 0.559615 |
9699cf08765c1189b0c9f59597ec525dfa9f8d71 | 10,891 | py | Python | h5sparse/h5sparse.py | tvandera/h5sparse | aba5694ced25953fb5474d2395a03498e22b0db6 | [
"MIT"
] | 1 | 2021-12-18T18:41:43.000Z | 2021-12-18T18:41:43.000Z | h5sparse/h5sparse.py | tvandera/h5sparse | aba5694ced25953fb5474d2395a03498e22b0db6 | [
"MIT"
] | null | null | null | h5sparse/h5sparse.py | tvandera/h5sparse | aba5694ced25953fb5474d2395a03498e22b0db6 | [
"MIT"
] | null | null | null | import six
import h5py
import numpy as np
import scipy.sparse as ss
FORMAT_DICT = {
'csr': ss.csr_matrix,
'csc': ss.csc_matrix,
'coo': ss.coo_matrix,
}
indptr_dtype = np.int64
indices_dtype = np.int64
row_dtype = np.int64
col_dtype = np.int64
def get_format_str(data):
for format_str, format_class in six.viewitems(FORMAT_DICT):
if isinstance(data, format_class):
return format_str
raise ValueError("Data type {} is not supported.".format(type(data)))
def get_format_class(format_str):
try:
format_class = FORMAT_DICT[format_str]
except KeyError:
raise ValueError("Format {} is not supported."
.format(format_str))
return format_class
def is_compressed_format(format_str):
return format_str in ('csc', 'csr')
class Group(h5py.Group):
"""The HDF5 group that can detect and create sparse matrix.
"""
def __getitem__(self, key):
h5py_item = super(Group, self).__getitem__(key)
if isinstance(h5py_item, h5py.Group):
if 'h5sparse_format' in h5py_item.attrs:
# detect the sparse matrix
return Dataset(h5py_item)
else:
return Group(h5py_item.id)
elif isinstance(h5py_item, h5py.Dataset):
return h5py_item
else:
raise ValueError("Unexpected item type.")
def create_dataset_compressed(self, name, sparse_format, shape, data, indices, indptr,
dtype, **kwargs):
"""Create a dataset in csc or csr format"""
assert sparse_format in ("csc", "csr")
group = self.create_group(name)
group.attrs['h5sparse_format'] = sparse_format
group.attrs['h5sparse_shape'] = shape
group.create_dataset('data', data=data, dtype=dtype, **kwargs)
group.create_dataset('indices', data=indices, dtype=indices_dtype, **kwargs)
group.create_dataset('indptr', data=indptr, dtype=indptr_dtype, **kwargs)
return group
def create_dataset_coo(self, name, sparse_format, shape, data, row, col,
dtype, **kwargs):
"""Create a dataset in csc or csr format"""
assert sparse_format == "coo"
group = self.create_group(name)
group.attrs['h5sparse_format'] = sparse_format
group.attrs['h5sparse_shape'] = shape
group.create_dataset('data', data=data, dtype=dtype, **kwargs)
group.create_dataset('row', data=row, dtype=row_dtype, **kwargs)
group.create_dataset('col', data=col, dtype=col_dtype, **kwargs)
return group
def create_dataset_from_dataset(self, name, data, dtype, **kwargs):
sparse_format = data.attrs['h5sparse_format']
if (is_compressed_format(sparse_format)):
group = self.create_dataset_compressed(name,
data.attrs['h5sparse_format'],
data.attrs['h5sparse_shape'],
data.h5py_group['data'],
data.h5py_group['indices'],
data.h5py_group['indptr'],
dtype,
**kwargs)
else:
group = self.create_dataset_coo(name,
data.attrs['h5sparse_format'],
data.attrs['h5sparse_shape'],
data.h5py_group['data'],
data.h5py_group['row'],
data.h5py_group['col'],
dtype,
**kwargs)
return group
def create_dataset_from_scipy(self, name, data, dtype, **kwargs):
sparse_format = get_format_str(data)
if (is_compressed_format(sparse_format)):
group = self.create_dataset_compressed(name,
sparse_format,
data.shape,
data.data,
data.indices,
data.indptr,
dtype,
**kwargs)
else:
group = self.create_dataset_coo(name,
sparse_format,
data.shape,
data.data,
data.row,
data.col,
dtype,
**kwargs)
return group
def create_dataset(self, name, shape=None, dtype=None, data=None,
sparse_format=None, **kwargs):
"""Create 3 datasets in a group to represent the sparse array.
Parameters
----------
sparse_format:
"""
if isinstance(data, Dataset):
assert sparse_format is None
group = self.create_dataset_from_dataset(name, data, dtype, **kwargs)
elif ss.issparse(data):
if sparse_format is not None:
format_class = get_format_class(sparse_format)
data = format_class(data)
group = self.create_dataset_from_scipy(name,
data,
dtype,
**kwargs)
elif data is None and sparse_format is not None:
format_class = get_format_class(sparse_format)
if dtype is None:
dtype = np.float64
if shape is None:
shape = (0, 0)
data = format_class(shape, dtype=dtype)
group = self.create_dataset_from_scipy(name,
data,
dtype,
**kwargs)
else:
# forward the arguments to h5py
assert sparse_format is None
return super(Group, self).create_dataset(
name, data=data, shape=shape, dtype=dtype, **kwargs)
return Dataset(group)
class File(h5py.File, Group):
"""The HDF5 file object that can detect and create sparse matrix.
"""
pass
class Dataset(h5py.Group):
"""The HDF5 sparse matrix dataset.
Parameters
----------
h5py_group : h5py.Dataset
"""
def __init__(self, h5py_group):
super(Dataset, self).__init__(h5py_group.id)
self.h5py_group = h5py_group
self.shape = tuple(self.attrs['h5sparse_shape'])
self.format_str = self.attrs['h5sparse_format']
self.dtype = h5py_group['data'].dtype
def __getitem__(self, key):
if isinstance(key, slice):
if key.step is not None:
raise NotImplementedError("Index step is not supported.")
start = key.start
stop = key.stop
if stop is not None and stop > 0:
stop += 1
if start is not None and start < 0:
start -= 1
indptr_slice = slice(start, stop)
indptr = self.h5py_group['indptr'][indptr_slice]
data = self.h5py_group['data'][indptr[0]:indptr[-1]]
indices = self.h5py_group['indices'][indptr[0]:indptr[-1]]
indptr -= indptr[0]
if self.format_str == 'csr':
shape = (indptr.size - 1, self.shape[1])
elif self.format_str == 'csc':
shape = (self.shape[0], indptr.size - 1)
else:
raise NotImplementedError("Slicing for format {} is not implemented."
.format(self.format_str))
format_class = get_format_class(self.attrs['h5sparse_format'])
return format_class((data, indices, indptr), shape=shape)
elif isinstance(key, tuple) and key == ():
if (is_compressed_format(self.format_str)):
data = self.h5py_group['data'][()]
indices = self.h5py_group['indices'][()]
indptr = self.h5py_group['indptr'][()]
shape = self.shape
format_class = get_format_class(self.attrs['h5sparse_format'])
return format_class((data, indices, indptr), shape=shape)
else:
data = self.h5py_group['data'][()]
row = self.h5py_group['row'][()]
col = self.h5py_group['col'][()]
shape = self.shape
format_class = get_format_class(self.attrs['h5sparse_format'])
return format_class((data, (row, col)), shape=shape)
else:
raise NotImplementedError("Only support one slice as index.")
@property
def value(self):
return self[()]
def append(self, sparse_matrix):
if self.format_str != get_format_str(sparse_matrix):
raise ValueError("Format not the same.")
if self.format_str == 'csr':
# data
data = self.h5py_group['data']
orig_data_size = data.shape[0]
new_shape = (orig_data_size + sparse_matrix.data.shape[0],)
data.resize(new_shape)
data[orig_data_size:] = sparse_matrix.data
# indptr
indptr = self.h5py_group['indptr']
orig_data_size = indptr.shape[0]
append_offset = indptr[-1]
new_shape = (orig_data_size + sparse_matrix.indptr.shape[0] - 1,)
indptr.resize(new_shape)
indptr[orig_data_size:] = (sparse_matrix.indptr[1:].astype(np.int64)
+ append_offset)
# indices
indices = self.h5py_group['indices']
orig_data_size = indices.shape[0]
new_shape = (orig_data_size + sparse_matrix.indices.shape[0],)
indices.resize(new_shape)
indices[orig_data_size:] = sparse_matrix.indices
# shape
self.shape = (
self.shape[0] + sparse_matrix.shape[0],
max(self.shape[1], sparse_matrix.shape[1]))
self.attrs['h5sparse_shape'] = self.shape
else:
raise NotImplementedError("The append method for format {} is not implemented."
.format(self.format_str))
| 40.790262 | 91 | 0.504637 |
453d0c7386ef4b5ccb7b6e695c794ad1d2ffb0e9 | 4,384 | py | Python | vancouver_film_school/vfs_r2_user_interfaces.py | TrevisanGMW/maya-scripts | 80840520924c8e6087cd6e3e1be6e4c5d663297b | [
"MIT"
] | 8 | 2020-01-03T03:13:32.000Z | 2021-11-06T05:09:44.000Z | vancouver_film_school/vfs_r2_user_interfaces.py | TrevisanGMW/maya-scripts | 80840520924c8e6087cd6e3e1be6e4c5d663297b | [
"MIT"
] | 3 | 2020-11-09T06:10:54.000Z | 2021-01-08T02:57:25.000Z | vancouver_film_school/vfs_r2_user_interfaces.py | TrevisanGMW/maya-scripts | 80840520924c8e6087cd6e3e1be6e4c5d663297b | [
"MIT"
] | 1 | 2020-09-18T00:06:40.000Z | 2020-09-18T00:06:40.000Z | '''
An example of how to generate a user interface using Python and Maya commands (cmds)
For more documentation go to "Maya > Help > Maya Scripting Reference > Python Command Reference" or visit the link below:
https://help.autodesk.com/view/MAYAUL/2020/ENU/index.html?contextId=COMMANDSPYTHON-INDEX
'''
import maya.cmds as cmds # Import Maya API
def window_name(): # Define a function for the window
if cmds.window("window_name", exists =True): # Check if the window exists
cmds.deleteUI("window_name")# if it does, delete it (so you don't have multiple windows with the same content)
# main dialog start here =================================================================================
window_name = cmds.window("window_name", title="My Window",\
titleBar=True,minimizeButton=False,maximizeButton=False, sizeable =True)# Create a window object (Unsure what the parameters do? Search for the documentation for cmds.window)
content_main = cmds.columnLayout(adj = True) # Create a column to populate with elements
cmds.separator(h=5, st="none" ) # Empty Space
# Create a text (these elements are all children of the columnLayout we created above, unless otherwise declared)
# How do you change the parent of a UI element? Use the "parent" parameter. For example cmds.text(parent='content_main_02')
my_text = cmds.text("This is an example of a text!") # Notice that the text is stored in a variable so we can reference it later
cmds.separator(h=10, st="none" ) # Empty Space
my_textfield = cmds.textField(placeholderText='This is a textfield') # Creates a textfield, and store it in a variable so we can reference it later
cmds.separator(h=10, st="none" ) # Empty Space
# Create a rowColumnLayout so we can have multiple elements in a row (this element is super helpful when adjusting your UI)
# columnWidth needs a list of tuples describing every column. For example [(1, 10)(2, 15)] would make change columns of the size 10 and 15
cmds.rowColumnLayout(numberOfColumns=3, columnWidth=[(1, 100), (2, 100),(3,10)], cs=[(1,10),(2,5),(3,5)])
cmds.button(l ="Create Cube", c=lambda x:create_standard_cube(), w=100, bgc=(.3,.7,.3)) # Create a button - (The lambda part of it is to make the button capable of calling other functions)
cmds.button(l ="Create Sphere", c=lambda x:create_standard_sphere(), w=100, bgc=(.3,.7,.3)) # Another button
cmds.separator(h=5, st="none" ) # Empty Space
cmds.rowColumnLayout( p=content_main, numberOfColumns=1, columnWidth=[(1, 205), (2, 100),(3,10)], cs=[(1,10),(2,5),(3,5)]) # Another rowColumnLayout, this time with only one Column
cmds.separator(h=5, st="none" ) # Empty Space
cmds.button(l ="Replace Text with Textfield", c=lambda x:replace_textfield(), w=100, bgc=(.3,.7,.3)) # Another button to update the text
cmds.separator(h=10, st="none" )# Empty Space
# Functions for the buttons (They have been declared inside of the main function so the have access to the button variables)
def create_standard_sphere(): # A function to create a sphere (used by one of the buttons)
cmds.polySphere(name='mySphere') # Function to create a simple sphere named "mySphere"
def create_standard_cube(): # Same thing for a cube
cmds.polyCube(name='myCube') # Function to create a simple cube named "myCube"
def replace_textfield(): # Function to replace the text element with the provided text
# Query the the information from the text field, dump it into a variable. The "q=True" means query, "text=True" tells the function what to query.
text_from_textfield = cmds.textField(my_textfield, q=True, text=True)
# Update the text with the information that what extracted from the variable
# Instead of using "q=True", this time we used "e=True" which means "edit". "label=text_from_textfield" determines what is being edited.
cmds.text(my_text, e=True, label=text_from_textfield)
cmds.showWindow(window_name) # Finally show the window element we created at the beginning
# main dialog ends here =================================================================================
window_name() # Call the main function | 71.868852 | 201 | 0.668568 |
ff9f554655ea93347f3600a77607791d8ef9c24d | 4,049 | py | Python | Demo/GenerateData2DConstantDensity.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | null | null | null | Demo/GenerateData2DConstantDensity.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | null | null | null | Demo/GenerateData2DConstantDensity.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | 1 | 2020-06-13T07:13:07.000Z | 2020-06-13T07:13:07.000Z | import time
import copy
import numpy as np
import matplotlib.pyplot as plt
import math
import os
from shutil import copy2
from mpl_toolkits.axes_grid1 import make_axes_locatable
import sys
import scipy.io as sio
from pysit import *
from pysit.gallery import horizontal_reflector
from pysit.util.io import *
from pysit.util.parallel import *
if __name__ == '__main__':
# Set up domain, mesh and velocity model
pmlx = PML(0.1, 1000)
pmlz = PML(0.1, 1000)
x_config = (0.0, 2.0, pmlx, pmlx)
z_config = (0.0, 1.0, pmlz, pmlz)
d = RectangularDomain(x_config, z_config)
m = CartesianMesh(d, 201, 101)
C, C0, m, d = horizontal_reflector(m)
# Set up shots
zmin = d.z.lbound
zmax = d.z.rbound
zpos = zmin + (1./10.)*zmax
Nshots = 1
shots = equispaced_acquisition(m,
RickerWavelet(10.0),
sources=Nshots,
source_depth=zpos,
source_kwargs={},
receivers='max',
receiver_depth=zpos,
receiver_kwargs={}
)
shots_freq = copy.deepcopy(shots)
# Define and configure the wave solver
trange = (0.0,2.0)
# Define the time-domain wave-equation solver and generate the time-domain data
solver = ConstantDensityAcousticWave(m,
spatial_accuracy_order=2,
trange=trange,
kernel_implementation='cpp')
base_model = solver.ModelParameters(m,{'C': C})
print('Generating time-domain data...')
tt = time.time()
generate_seismic_data(shots, solver, base_model)
print('Time-domain data generation: {0}s'.format(time.time()-tt))
# Check the result and plot the result
clim = C.min(),C.max()
plt.figure(figsize=(20,4))
plt.subplot(1,2,1)
vis.plot(C0, m, clim=clim)
plt.title(r'Initial Model of $v$')
plt.colorbar()
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.subplot(1,2,2)
vis.plot(C, m, clim=clim)
plt.title(r"True Model of $v$")
plt.colorbar()
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.show()
data = shots[0].receivers.data
t_smp = np.linspace(trange[0], trange[1], data.shape[0])
fig=plt.figure()
im1=plt.imshow(data, interpolation='nearest', aspect='auto', cmap='seismic', clim =[-.1,.1],
extent=[0.0, 2.0, t_smp[-1], 0.0])
plt.xlabel('Receivers [km]')
plt.ylabel('Time [s]')
plt.colorbar()
plt.show()
# Define the frequency-domain wave-equation solver and generate the frequency-domain data
solver = ConstantDensityHelmholtz(m,
spatial_accuracy_order=4)
frequencies = [2.0,3.0]
print('Generating frequency-domain data...')
tt = time.time()
generate_seismic_data(shots_freq, solver, base_model, frequencies=frequencies, petsc='mumps')
print('Frequency-domain data generation: {0}s'.format(time.time()-tt))
# Check the result and plot the result
xrec = np.linspace(0.0,2.0,201)
data1 = shots_freq[0].receivers.data_dft[2.0]
data2 = shots_freq[0].receivers.data_dft[3.0]
plt.figure(figsize=(12,12))
plt.subplot(2,2,1)
plt.plot(xrec, np.real(data1.flatten()))
plt.xlabel('Receivers [km]')
plt.title('Real part of data at f = 2.0Hz')
plt.subplot(2,2,2)
plt.plot(xrec, np.real(data2.flatten()))
plt.xlabel('Receivers [km]')
plt.title('Real part of data at f = 3.0Hz')
plt.subplot(2,2,3)
plt.plot(xrec, np.imag(data1.flatten()))
plt.xlabel('Receivers [km]')
plt.title('Imaginary part of data at f = 2.0Hz')
plt.subplot(2,2,4)
plt.plot(xrec, np.imag(data2.flatten()))
plt.xlabel('Receivers [km]')
plt.title('Imaginary part of data at f = 3.0Hz')
plt.show()
| 28.514085 | 97 | 0.579649 |
b9a4dec286ffd140855776b932524d69d666d26e | 269 | py | Python | terrascript/cronitor/__init__.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | terrascript/cronitor/__init__.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | terrascript/cronitor/__init__.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # terrascript/cronitor/__init__.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class cronitor(terrascript.Provider):
pass
| 17.933333 | 79 | 0.773234 |
47132749e80b236baacfe51b6d27021d34274b9e | 597 | py | Python | setup.py | aktur/kodi-voice | 90a9c91a619ce4676eb919a2664a2150a38a407b | [
"MIT"
] | null | null | null | setup.py | aktur/kodi-voice | 90a9c91a619ce4676eb919a2664a2150a38a407b | [
"MIT"
] | null | null | null | setup.py | aktur/kodi-voice | 90a9c91a619ce4676eb919a2664a2150a38a407b | [
"MIT"
] | null | null | null | from setuptools import setup
from pip.req import parse_requirements
setup(
name = 'Kodi-Voice',
packages = ['kodi_voice'],
version = '1.1.0',
description = 'A library for interfacing with Kodi with VUI platforms like Amazon Alexa, Google Home, and Cortana.',
author = 'Joe Ipson',
author_email = 'joe@ipson.me',
url = 'https://github.com/m0ngr31/kodi-voice',
zip_safe = False,
include_package_data = True,
keywords = ['kodi', 'voice', 'alexa'],
classifiers = [],
install_requires = ['requests', 'boto3', 'pyocclient', 'ConfigParser', 'num2words', 'roman', 'fuzzywuzzy']
)
| 33.166667 | 118 | 0.688442 |
de8c357819964944175cf0c8eb2ff3e6d48ea947 | 3,436 | py | Python | Trakttv.bundle/Contents/Libraries/Shared/plex_metadata/agents/agent.py | disrupted/Trakttv.bundle | 24712216c71f3b22fd58cb5dd89dad5bb798ed60 | [
"RSA-MD"
] | 1,346 | 2015-01-01T14:52:24.000Z | 2022-03-28T12:50:48.000Z | Trakttv.bundle/Contents/Libraries/Shared/plex_metadata/agents/agent.py | alcroito/Plex-Trakt-Scrobbler | 4f83fb0860dcb91f860d7c11bc7df568913c82a6 | [
"RSA-MD"
] | 474 | 2015-01-01T10:27:46.000Z | 2022-03-21T12:26:16.000Z | Trakttv.bundle/Contents/Libraries/Shared/plex_metadata/agents/agent.py | alcroito/Plex-Trakt-Scrobbler | 4f83fb0860dcb91f860d7c11bc7df568913c82a6 | [
"RSA-MD"
] | 191 | 2015-01-02T18:27:22.000Z | 2022-03-29T10:49:48.000Z | from plex_metadata.core.helpers import try_convert
import plex.lib.six.moves.urllib_parse as urlparse
import logging
import re
DEFAULT_MEDIA = ['movie', 'show', 'season', 'episode']
log = logging.getLogger(__name__)
class Agent(object):
def __init__(self, media, service, regex=None, type=None, children=None, season=None):
self.media = media
self.service = service
self.regex = regex
self.type = type
self.children = children
self.season = season
#
# Compile
#
@classmethod
def compile(cls, entry, media=None):
# Construct `Agent`
return cls(
media=cls.get_media(entry, media),
service=entry.get('service'),
type=entry.get('type'),
# Compile regular expression
regex=cls.compile_pattern(entry.get('pattern')),
# Compile children
children=[
cls.compile(child, media)
for child in (entry.get('children') or [])
],
# Overrides
season=entry.get('season')
)
@staticmethod
def compile_pattern(pattern):
if pattern is None:
return None
try:
return re.compile(pattern, re.IGNORECASE)
except Exception as ex:
log.warn('Unable to compile regular expression: %r - %s', pattern, ex, exc_info=True)
return None
@staticmethod
def get_media(entry, media=None):
if entry.get('media') is None:
return DEFAULT_MEDIA
return entry.get('media') + (media or [])
#
# Fill
#
def fill(self, guid, uri, media=None):
# Validate media matches agent
if media is not None and media not in self.media:
return False
# Search children for match
if self.children:
# Iterate over children, checking if `guid` can be filled
for child in self.children:
if child.fill(guid, uri, media):
return True
# Parse netloc (media id)
if self.regex:
# Match `uri.netloc` against pattern
match = self.regex.match(uri.netloc)
if not match:
return False
id = ''.join(match.groups())
else:
id = uri.netloc
# Cast `id` to defined type
if self.type:
id = try_convert(id, self.type, id)
# Update `guid`
guid.service = self.service or guid.agent_id
guid.id = id
# Fill `guid` with extra details from URI
self.fill_path(guid, uri.path)
self.fill_query(guid, uri.query)
# Process overrides
if self.season is not None:
guid.season = self.season
return True
def fill_path(self, guid, path):
# Split path into fragments
fragments = path.strip('/').split('/')
# Retrieve TV parameters
if 'season' in self.media and len(fragments) >= 1:
guid.season = try_convert(fragments[0], int)
if 'episode' in self.media and len(fragments) >= 2:
guid.episode = try_convert(fragments[1], int)
@staticmethod
def fill_query(guid, query):
# Parse query parameters
parameters = dict(urlparse.parse_qsl(query))
# Update `guid` with parameters
guid.language = parameters.get('lang')
| 26.030303 | 97 | 0.565192 |
dcaa95d7695fa997064618e116a728f0da188596 | 2,956 | py | Python | src/bandersnatch/tests/test_utils.py | sukhbeersingh/bandersnatch | 5dfab28c5e965ccaab9cebc5bd090185ff28f8ad | [
"AFL-3.0"
] | 2 | 2020-04-22T15:00:30.000Z | 2020-04-22T17:21:50.000Z | src/bandersnatch/tests/test_utils.py | DalavanCloud/bandersnatch | 88f4d574f8bf55e75c01895aca41d32666ec9f54 | [
"AFL-3.0"
] | 1 | 2021-06-02T02:47:02.000Z | 2021-06-02T02:47:02.000Z | src/bandersnatch/tests/test_utils.py | DalavanCloud/bandersnatch | 88f4d574f8bf55e75c01895aca41d32666ec9f54 | [
"AFL-3.0"
] | null | null | null | import os
import os.path
import re
from pathlib import Path
from tempfile import TemporaryDirectory, gettempdir
import pytest
from bandersnatch.utils import ( # isort:skip
convert_url_to_path,
hash,
recursive_find_files,
rewrite,
unlink_parent_dir,
user_agent,
)
def test_convert_url_to_path():
assert (
"packages/8f/1a/1aa000db9c5a799b676227e845d2b64fe725328e05e3d3b30036f"
+ "50eb316/peerme-1.0.0-py36-none-any.whl"
== convert_url_to_path(
"https://files.pythonhosted.org/packages/8f/1a/1aa000db9c5a799b67"
+ "6227e845d2b64fe725328e05e3d3b30036f50eb316/"
+ "peerme-1.0.0-py36-none-any.whl"
)
)
def test_hash():
sample = os.path.join(os.path.dirname(__file__), "sample")
assert hash(sample, function="md5") == "125765989403df246cecb48fa3e87ff8"
assert hash(sample, function="sha256") == (
"95c07c174663ebff531eed59b326ebb3fa95f418f680349fc33b07dfbcf29f18"
)
assert hash(sample) == (
"95c07c174663ebff531eed59b326ebb3fa95f418f680349fc33b07dfbcf29f18"
)
def test_find_files():
with TemporaryDirectory() as td:
td_path = Path(td)
td_sub_path = td_path / "aDir"
td_sub_path.mkdir()
expected_found_files = {td_path / "file1", td_sub_path / "file2"}
for afile in expected_found_files:
with afile.open("w") as afp:
afp.write("PyPA ftw!")
found_files = set()
recursive_find_files(found_files, td_path)
assert found_files == expected_found_files
def test_rewrite(tmpdir, monkeypatch):
monkeypatch.chdir(tmpdir)
with open("sample", "w") as f:
f.write("bsdf")
with rewrite("sample") as f:
f.write("csdf")
assert open("sample").read() == "csdf"
mode = os.stat("sample").st_mode
assert oct(mode) == "0o100644"
def test_rewrite_fails(tmpdir, monkeypatch):
monkeypatch.chdir(tmpdir)
with open("sample", "w") as f:
f.write("bsdf")
with pytest.raises(Exception):
with rewrite("sample") as f:
f.write("csdf")
raise Exception()
assert open("sample").read() == "bsdf"
def test_rewrite_nonexisting_file(tmpdir, monkeypatch):
monkeypatch.chdir(tmpdir)
with rewrite("sample", "w") as f:
f.write("csdf")
with open("sample", "r") as f:
assert f.read() == "csdf"
def test_unlink_parent_dir():
adir = Path(gettempdir()) / f"tb.{os.getpid()}"
adir.mkdir()
afile = adir / "file1"
afile.touch()
unlink_parent_dir(afile)
assert not adir.exists()
def test_user_agent():
assert re.match(
r"bandersnatch/[0-9]\.[0-9]\.[0-9]\.?d?e?v?[0-9]? \(.*\)", user_agent()
)
def test_user_agent_async():
async_ver = "aiohttp 0.6.9"
assert re.match(
fr"bandersnatch/[0-9]\.[0-9]\.[0-9]\.?d?e?v?[0-9]? \(.*\) \({async_ver}\)",
user_agent(async_ver),
)
| 27.119266 | 83 | 0.632273 |
e316b2016f22a50570b7e0f05f9705acd74c792d | 2,817 | py | Python | backend/api/main.py | jeano-star/Yacht | 0aed12e1dca5ef34c3526d5170063558a691ee92 | [
"MIT"
] | 1 | 2020-10-23T18:52:17.000Z | 2020-10-23T18:52:17.000Z | backend/api/main.py | ptTrR/Yacht | 396a59f7a1b25e96c52c33cc7b0986f2d8dedb1c | [
"MIT"
] | null | null | null | backend/api/main.py | ptTrR/Yacht | 396a59f7a1b25e96c52c33cc7b0986f2d8dedb1c | [
"MIT"
] | null | null | null | import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from .routers import apps, templates, app_settings
import uuid
from .db import models
from .db.database import SessionLocal, engine
from .routers.app_settings import read_template_variables, set_template_variables, SessionLocal
from sqlalchemy.orm import Session
from .settings import Settings
from .utils import get_db
from .auth import fastapi_users, cookie_authentication, database, users, user_create, UserDB, get_password_hash
app = FastAPI(root_path="/api")
models.Base.metadata.create_all(bind=engine)
settings = Settings()
app.include_router(
apps.router,
prefix="/apps",
tags=["apps"],
# dependencies=[Depends(get_token_header)],
responses={404: {"description": "Not found"}},
)
app.include_router(
fastapi_users.get_auth_router(cookie_authentication),
prefix="/auth",
tags=["auth"]
)
app.include_router(
fastapi_users.get_users_router(),
prefix="/users",
tags=["users"]
)
app.include_router(
templates.router,
prefix="/templates",
tags=["templates"],
# dependencies=[Depends(get_token_header)],
responses={404: {"description": "Not found"}},
)
app.include_router(
app_settings.router,
prefix="/settings",
tags=["settings"]
)
@app.on_event("startup")
async def startup():
await database.connect()
# Clear old db migrations
delete_alembic = "DROP TABLE IF EXISTS alembic_version;"
await database.execute(delete_alembic)
users_exist = await database.fetch_all(query=users.select())
if users_exist:
print("Users Exist")
else:
print("No Users. Creating the default user.")
# This is where I'm having trouble
hashed_password = get_password_hash(settings.ADMIN_PASSWORD)
base_user = UserDB(
id=uuid.uuid4(),
email=settings.ADMIN_EMAIL,
hashed_password=hashed_password,
is_active=True,
is_superuser=True
)
user_created = await user_create(base_user)
template_variables_exist = read_template_variables(SessionLocal())
if template_variables_exist:
print("Template Variables Exist")
else:
print("No Variables yet!")
t_vars = settings.BASE_TEMPLATE_VARIABLES
t_var_list = []
for t in t_vars:
template_variables = models.TemplateVariables(
variable=t.get("variable"),
replacement=t.get("replacement")
)
t_var_list.append(template_variables)
set_template_variables(new_variables=t_var_list, db=SessionLocal())
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
if __name__ == "__main__":
uvicorn.run("main:app", host='0.0.0.0', port=8000, reload=True)
| 29.041237 | 111 | 0.688676 |
e42d467516b6b158a0c184f4bc065d3a91dd36d1 | 1,288 | py | Python | blendergltf/exporters/camera.py | iamthad/blendergltf | a7d162b0db9b2a5a7bd5399badca7a43def8c7ab | [
"Apache-2.0"
] | 343 | 2016-03-09T22:39:15.000Z | 2022-01-31T03:13:28.000Z | blendergltf/exporters/camera.py | theirishduck/blendergltf | 9943d9afce10285396688a7359efea6032054fb3 | [
"Apache-2.0"
] | 149 | 2016-02-15T06:15:51.000Z | 2019-04-25T18:59:46.000Z | blendergltf/exporters/camera.py | theirishduck/blendergltf | 9943d9afce10285396688a7359efea6032054fb3 | [
"Apache-2.0"
] | 56 | 2016-02-13T06:24:10.000Z | 2021-02-07T01:49:43.000Z | from .base import BaseExporter
class CameraExporter(BaseExporter):
gltf_key = 'cameras'
blender_key = 'cameras'
@classmethod
def export(cls, state, blender_data):
camera_gltf = {}
if blender_data.type == 'ORTHO':
xmag = 0.5 * blender_data.ortho_scale
ymag = xmag * state['aspect_ratio']
camera_gltf = {
'orthographic': {
'xmag': ymag,
'ymag': xmag,
'zfar': blender_data.clip_end,
'znear': blender_data.clip_start,
},
'type': 'orthographic',
}
else:
angle_y = blender_data.angle_y if blender_data.angle_y != 0.0 else 1e-6
camera_gltf = {
'perspective': {
'aspectRatio': blender_data.angle_x / angle_y,
'yfov': angle_y,
'zfar': blender_data.clip_end,
'znear': blender_data.clip_start,
},
'type': 'perspective',
}
camera_gltf['name'] = blender_data.name
extras = cls.get_custom_properties(blender_data)
if extras:
camera_gltf['extras'] = extras
return camera_gltf
| 33.025641 | 83 | 0.497671 |
42dee4865cbda7616f3eaff73caf6f0ea36dba30 | 1,288 | py | Python | tests/test_wrapper.py | kurusugawa-computer/annowork-api-python-client-draft | 40ee4481f763bbff15f28a93f7e028f25a744dab | [
"MIT"
] | null | null | null | tests/test_wrapper.py | kurusugawa-computer/annowork-api-python-client-draft | 40ee4481f763bbff15f28a93f7e028f25a744dab | [
"MIT"
] | null | null | null | tests/test_wrapper.py | kurusugawa-computer/annowork-api-python-client-draft | 40ee4481f763bbff15f28a93f7e028f25a744dab | [
"MIT"
] | null | null | null | import configparser
import datetime
import os
import annoworkapi
os.chdir(os.path.dirname(os.path.abspath(__file__)) + "/../")
inifile = configparser.ConfigParser()
inifile.read("./pytest.ini", "UTF-8")
workspace_id = inifile["annowork"]["workspace_id"]
service = annoworkapi.build()
class TestActualWorkingTime:
jtc_tzinfo = datetime.timezone(datetime.timedelta(hours=9))
def test_get_actual_working_times_daily(self):
tmp = service.wrapper.get_actual_working_times_daily(
workspace_id, term_start_date="2021-11-15", term_end_date="2021-11-15", tzinfo=self.jtc_tzinfo
)
def test_get_actual_working_times_by_workspace_member_daily(self):
print(f"{workspace_id=}")
tmp = service.wrapper.get_actual_working_times_by_workspace_member_daily(
workspace_id,
"c566151e-f8bb-4f73-9c78-af40da1814ef",
term_start_date="2021-11-15",
term_end_date="2021-11-16",
tzinfo=self.jtc_tzinfo,
)
class TestSchedule:
def test_get_schedules_daily(self):
tmp = service.wrapper.get_schedules_daily(
workspace_id,
term_start="2021-11-15",
term_end="2021-11-15",
job_id="4ec6c1cf-84ed-4e28-8074-491dbce64599",
)
| 29.953488 | 106 | 0.681677 |
c85bea4abce7bdcaf9452585699167a1000c9317 | 5,083 | py | Python | openmdao.lib/src/openmdao/lib/drivers/doedriver.py | swryan/OpenMDAO-Framework | f50d60e1a8cadac7fe03d26ffad5fb660b2a15ec | [
"Apache-2.0"
] | null | null | null | openmdao.lib/src/openmdao/lib/drivers/doedriver.py | swryan/OpenMDAO-Framework | f50d60e1a8cadac7fe03d26ffad5fb660b2a15ec | [
"Apache-2.0"
] | null | null | null | openmdao.lib/src/openmdao/lib/drivers/doedriver.py | swryan/OpenMDAO-Framework | f50d60e1a8cadac7fe03d26ffad5fb660b2a15ec | [
"Apache-2.0"
] | null | null | null | """
.. _`DOEdriver.py`:
``doedriver.py`` -- Driver that executes a Design of Experiments.
"""
import csv
# pylint: disable-msg=E0611,F0401
from openmdao.lib.datatypes.api import Bool, List, Slot, Float, Str
from openmdao.main.case import Case
from openmdao.main.interfaces import IDOEgenerator, ICaseFilter, implements, \
IHasParameters
from openmdao.lib.drivers.caseiterdriver import CaseIterDriverBase
from openmdao.util.decorators import add_delegate
from openmdao.main.hasparameters import HasParameters
@add_delegate(HasParameters)
class DOEdriver(CaseIterDriverBase):
""" Driver for Design of Experiments. """
implements(IHasParameters)
# pylint: disable-msg=E1101
DOEgenerator = Slot(IDOEgenerator, iotype='in', required=True,
desc='Iterator supplying normalized DOE values.')
record_doe = Bool(True, iotype='in',
desc='Record normalized DOE values to CSV file.')
doe_filename = Str('', iotype='in',
desc='Name of CSV file to record to'
' (default is <driver-name>.csv).')
case_outputs = List(Str, iotype='in',
desc='A list of outputs to be saved with each case.')
case_filter = Slot(ICaseFilter, iotype='in',
desc='Selects cases to be run.')
def execute(self):
"""Generate and evaluate cases."""
self._csv_file = None
try:
super(DOEdriver, self).execute()
finally:
if self._csv_file is not None:
self._csv_file.close()
def get_case_iterator(self):
"""Returns a new iterator over the Case set."""
return self._get_cases()
def _get_cases(self):
"""Generate each case."""
params = self.get_parameters().values()
self.DOEgenerator.num_parameters = len(params)
record_doe = self.record_doe
events = self.get_events()
outputs = self.case_outputs
case_filter = self.case_filter
if record_doe:
if not self.doe_filename:
self.doe_filename = '%s.csv' % self.name
self._csv_file = open(self.doe_filename, 'wb')
csv_writer = csv.writer(self._csv_file)
for i, row in enumerate(self.DOEgenerator):
if record_doe:
csv_writer.writerow(['%.16g' % val for val in row])
vals = [p.low+(p.high-p.low)*val for p,val in zip(params,row)]
case = self.set_parameters(vals, Case(parent_uuid=self._case_id))
# now add events
for varname in events:
case.add_input(varname, True)
case.add_outputs(outputs)
if case_filter is None or case_filter.select(i, case):
yield case
if record_doe:
self._csv_file.close()
self._csv_file = None
@add_delegate(HasParameters)
class NeighborhoodDOEdriver(CaseIterDriverBase):
"""Driver for Design of Experiments within a specified neighborhood
around a point."""
# pylint: disable-msg=E1101
DOEgenerator = Slot(IDOEgenerator, iotype='in', required=True,
desc='Iterator supplying normalized DOE values.')
case_outputs = List(Str, iotype='in',
desc='A list of outputs to be saved with each case.')
alpha = Float(.3, low=.01, high =1.0, iotype='in',
desc='Multiplicative factor for neighborhood DOE Driver.')
beta = Float(.01, low=.001, high=1.0, iotype='in',
desc='Another factor for neighborhood DOE Driver.')
def get_case_iterator(self):
"""Returns a new iterator over the Case set."""
return self._get_cases()
def _get_cases(self):
params = self.get_parameters().values()
self.DOEgenerator.num_parameters = len(params)
M = []
P = []
for p in params:
temp = p.evaluate()
P.append(temp)
M.append((temp-p.low)/(p.high-p.low))
for row in list(self.DOEgenerator)+[tuple(M)]:
vals = []
for p,val,curval in zip(params, row, P):
delta_low = curval-p.low
k_low = 1.0/(1.0+(1-self.beta)*delta_low)
new_low = curval - self.alpha*k_low*delta_low#/(self.exec_count+1)
delta_high = p.high-curval
k_high = 1.0/(1.0+(1-self.beta)*delta_high)
new_high = curval + self.alpha*k_high*delta_high#/(self.exec_count+1)
newval = new_low+(new_high-new_low)*val
vals.append(newval)
case = self.set_parameters(vals, Case(parent_uuid=self._case_id))
# now add events
for varname in self.get_events():
case.add_input(varname, True)
case.add_outputs(self.case_outputs)
yield case
| 34.578231 | 85 | 0.576038 |
1f89124a6b57ffe50ca88744506a3d3c701ee400 | 422 | py | Python | packages/python/plotly/plotly/validators/scatter3d/error_z/_visible.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/scatter3d/error_z/_visible.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/scatter3d/error_z/_visible.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="visible", parent_name="scatter3d.error_z", **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| 30.142857 | 78 | 0.656398 |
2ad327a0dc6b5044e76002c49ee07bcfd94635ae | 2,386 | py | Python | scripts/artifacts/knowCusage.py | mastenp/iLEAPP | ee40ef7505b36d0b9b04131f284a9d4d036514a5 | [
"MIT"
] | 1 | 2020-10-06T20:28:03.000Z | 2020-10-06T20:28:03.000Z | scripts/artifacts/knowCusage.py | mastenp/iLEAPP | ee40ef7505b36d0b9b04131f284a9d4d036514a5 | [
"MIT"
] | null | null | null | scripts/artifacts/knowCusage.py | mastenp/iLEAPP | ee40ef7505b36d0b9b04131f284a9d4d036514a5 | [
"MIT"
] | null | null | null | import glob
import os
import pathlib
import plistlib
import sqlite3
import json
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows
def get_knowCusage(files_found, report_folder, seeker):
file_found = str(files_found[0])
db = sqlite3.connect(file_found)
cursor = db.cursor()
cursor.execute(
"""
SELECT
DATETIME(ZOBJECT.ZSTARTDATE+978307200,'UNIXEPOCH') AS "START",
DATETIME(ZOBJECT.ZENDDATE+978307200,'UNIXEPOCH') AS "END",
ZOBJECT.ZVALUESTRING AS "BUNDLE ID",
(ZOBJECT.ZENDDATE - ZOBJECT.ZSTARTDATE) AS "USAGE IN SECONDS",
(ZOBJECT.ZENDDATE - ZOBJECT.ZSTARTDATE)/60.00 AS "USAGE IN MINUTES",
ZSOURCE.ZDEVICEID AS "DEVICE ID (HARDWARE UUID)",
CASE ZOBJECT.ZSTARTDAYOFWEEK
WHEN "1" THEN "Sunday"
WHEN "2" THEN "Monday"
WHEN "3" THEN "Tuesday"
WHEN "4" THEN "Wednesday"
WHEN "5" THEN "Thursday"
WHEN "6" THEN "Friday"
WHEN "7" THEN "Saturday"
END "DAY OF WEEK",
ZOBJECT.ZSECONDSFROMGMT/3600 AS "GMT OFFSET",
DATETIME(ZOBJECT.ZCREATIONDATE+978307200,'UNIXEPOCH') AS "ENTRY CREATION",
ZOBJECT.ZUUID AS "UUID",
ZOBJECT.Z_PK AS "ZOBJECT TABLE ID"
FROM
ZOBJECT
LEFT JOIN
ZSTRUCTUREDMETADATA
ON ZOBJECT.ZSTRUCTUREDMETADATA = ZSTRUCTUREDMETADATA.Z_PK
LEFT JOIN
ZSOURCE
ON ZOBJECT.ZSOURCE = ZSOURCE.Z_PK
WHERE
ZSTREAMNAME = "/app/usage"
)
""")
all_rows = cursor.fetchall()
usageentries = len(all_rows)
data_list = []
if usageentries > 0:
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10]))
description = ''
report = ArtifactHtmlReport('KnowledgeC App Usage')
report.start_artifact_report(report_folder, 'KnowledgeC App Usage', description)
report.add_script()
data_headers = ('Start','End','Bundle ID','Usage in Seconds','Usage in Minutes','Device ID','Day of the Week','GMT Offset','Entry Creation','UUID','Zobject Table ID' )
report.write_artifact_data_table(data_headers, data_list, file_found, html_escape=False)
report.end_artifact_report()
tsvname = 'KnowledgeC App Usage'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC App Usage'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No data available in table') | 33.138889 | 174 | 0.715842 |
2925722b95da3e926b189c9672a5a9dac59b698d | 5,468 | py | Python | test/functional/wallet_listdescriptors.py | republic-productions/finalcoin | 7c0f335ded1e5c662034c822ca2c474b8e62778f | [
"MIT"
] | null | null | null | test/functional/wallet_listdescriptors.py | republic-productions/finalcoin | 7c0f335ded1e5c662034c822ca2c474b8e62778f | [
"MIT"
] | null | null | null | test/functional/wallet_listdescriptors.py | republic-productions/finalcoin | 7c0f335ded1e5c662034c822ca2c474b8e62778f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2021 The Finalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listdescriptors RPC."""
from test_framework.descriptors import (
descsum_create
)
from test_framework.test_framework import FinalcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class ListDescriptorsTest(FinalcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_sqlite()
# do not create any wallet by default
def init_wallet(self, i):
return
def run_test(self):
node = self.nodes[0]
assert_raises_rpc_error(-18, 'No wallet is loaded.', node.listdescriptors)
if self.is_bdb_compiled():
self.log.info('Test that the command is not available for legacy wallets.')
node.createwallet(wallet_name='w1', descriptors=False)
assert_raises_rpc_error(-4, 'listdescriptors is not available for non-descriptor wallets', node.listdescriptors)
self.log.info('Test the command for empty descriptors wallet.')
node.createwallet(wallet_name='w2', blank=True, descriptors=True)
assert_equal(0, len(node.get_wallet_rpc('w2').listdescriptors()['descriptors']))
self.log.info('Test the command for a default descriptors wallet.')
node.createwallet(wallet_name='w3', descriptors=True)
result = node.get_wallet_rpc('w3').listdescriptors()
assert_equal("w3", result['wallet_name'])
assert_equal(6, len(result['descriptors']))
assert_equal(6, len([d for d in result['descriptors'] if d['active']]))
assert_equal(3, len([d for d in result['descriptors'] if d['internal']]))
for item in result['descriptors']:
assert item['desc'] != ''
assert item['next'] == 0
assert item['range'] == [0, 0]
assert item['timestamp'] is not None
self.log.info('Test descriptors with hardened derivations are listed in importable form.')
xprv = 'tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg'
xpub_acc = 'tpubDCMVLhErorrAGfApiJSJzEKwqeaf2z3NrkVMxgYQjZLzMjXMBeRw2muGNYbvaekAE8rUFLftyEar4LdrG2wXyyTJQZ26zptmeTEjPTaATts'
hardened_path = '/84\'/1\'/0\''
wallet = node.get_wallet_rpc('w2')
wallet.importdescriptors([{
'desc': descsum_create('wpkh(' + xprv + hardened_path + '/0/*)'),
'timestamp': 1296688602,
}])
expected = {
'wallet_name': 'w2',
'descriptors': [
{'desc': descsum_create('wpkh([80002067' + hardened_path + ']' + xpub_acc + '/0/*)'),
'timestamp': 1296688602,
'active': False,
'range': [0, 0],
'next': 0},
],
}
assert_equal(expected, wallet.listdescriptors())
assert_equal(expected, wallet.listdescriptors(False))
self.log.info('Test list private descriptors')
expected_private = {
'wallet_name': 'w2',
'descriptors': [
{'desc': descsum_create('wpkh(' + xprv + hardened_path + '/0/*)'),
'timestamp': 1296688602,
'active': False,
'range': [0, 0],
'next': 0},
],
}
assert_equal(expected_private, wallet.listdescriptors(True))
self.log.info("Test listdescriptors with encrypted wallet")
wallet.encryptwallet("pass")
assert_equal(expected, wallet.listdescriptors())
self.log.info('Test list private descriptors with encrypted wallet')
assert_raises_rpc_error(-13, 'Please enter the wallet passphrase with walletpassphrase first.', wallet.listdescriptors, True)
wallet.walletpassphrase(passphrase="pass", timeout=1000000)
assert_equal(expected_private, wallet.listdescriptors(True))
self.log.info('Test list private descriptors with watch-only wallet')
node.createwallet(wallet_name='watch-only', descriptors=True, disable_private_keys=True)
watch_only_wallet = node.get_wallet_rpc('watch-only')
watch_only_wallet.importdescriptors([{
'desc': descsum_create('wpkh(' + xpub_acc + ')'),
'timestamp': 1296688602,
}])
assert_raises_rpc_error(-4, 'Can\'t get descriptor string', watch_only_wallet.listdescriptors, True)
self.log.info('Test non-active non-range combo descriptor')
node.createwallet(wallet_name='w4', blank=True, descriptors=True)
wallet = node.get_wallet_rpc('w4')
wallet.importdescriptors([{
'desc': descsum_create('combo(' + node.get_deterministic_priv_key().key + ')'),
'timestamp': 1296688602,
}])
expected = {
'wallet_name': 'w4',
'descriptors': [
{'active': False,
'desc': 'combo(0227d85ba011276cf25b51df6a188b75e604b38770a462b2d0e9fb2fc839ef5d3f)#np574htj',
'timestamp': 1296688602},
]
}
assert_equal(expected, wallet.listdescriptors())
if __name__ == '__main__':
ListDescriptorsTest().main()
| 42.71875 | 133 | 0.636613 |
6b9628f0a49ee9a98dbcc5d4010c5b3063134bfb | 2,071 | py | Python | rlpyt/ul/experiments/rl_from_ul/configs/dmlab_ppo_from_ul.py | traffic-lights/rlpyt | ec4689cddd55d98c037194685cfd6ca8e6785014 | [
"MIT"
] | 2,122 | 2019-07-02T13:19:10.000Z | 2022-03-22T09:59:42.000Z | rlpyt/ul/experiments/rl_from_ul/configs/dmlab_ppo_from_ul.py | traffic-lights/rlpyt | ec4689cddd55d98c037194685cfd6ca8e6785014 | [
"MIT"
] | 206 | 2019-07-02T14:19:42.000Z | 2022-02-15T02:34:28.000Z | rlpyt/ul/experiments/rl_from_ul/configs/dmlab_ppo_from_ul.py | traffic-lights/rlpyt | ec4689cddd55d98c037194685cfd6ca8e6785014 | [
"MIT"
] | 369 | 2019-07-02T13:38:28.000Z | 2022-03-28T11:16:39.000Z |
import copy
configs = dict()
config = dict(
agent=dict(
state_dict_filename=None,
load_conv=True,
load_all=False, # Just for replay saving.
),
algo=dict(
discount=0.99,
learning_rate=2.5e-4,
value_loss_coeff=0.5,
entropy_loss_coeff=0.01, # LEVEL-SPECIFIC
clip_grad_norm=100.,
initial_optim_state_dict=None,
gae_lambda=0.97,
minibatches=2,
epochs=1,
ratio_clip=0.1,
linear_lr_schedule=False,
normalize_advantage=False,
),
env=dict(
level="lasertag_one_opponent_small",
frame_history=1,
fps=None,
),
# Will use same args for eval env.
model=dict(
# use_fourth_layer=True,
skip_connections=True,
lstm_size=256,
hidden_sizes=None,
kiaming_init=True,
skip_lstm=True,
stop_conv_grad=False,
),
optim=dict(),
runner=dict(
n_steps=25e6,
log_interval_steps=1e5,
),
sampler=dict(
batch_T=128,
batch_B=16,
max_decorrelation_steps=3000,
),
pretrain=dict( # Just for logging purposes.
name=None,
algo=None,
n_updates=None,
log_interval_updates=None,
learning_rate=None,
target_update_tau=None,
batch_B=None,
batch_T=None,
warmup_T=None,
delta_T=None,
hidden_sizes=None,
latent_size=None,
batch_size=None,
validation_batch_size=None,
activation_loss_coefficient=None,
replay=None,
model_dir=None,
learning_rate_anneal=None,
learning_rate_warmup=None,
weight_decay=None,
anchor_hidden_sizes=None,
action_condition=False,
transform_hidden_sizes=None,
kiaming_init=True,
data_aug=None,
random_shift_prob=None,
use_global_global=None,
use_global_local=None,
use_local_local=None,
local_conv_layer=None,
),
)
configs["ppo_16env"] = config
| 23.804598 | 50 | 0.593433 |
c231bbef0ae99322dd562abf1707ed2f406258b5 | 2,541 | py | Python | src/lgr_advanced/lgr_editor/forms/importer.py | icann/lgr-django | 1d0e272e0273ea2f0fda07396e86450e95dfe255 | [
"BSD-3-Clause"
] | 1 | 2018-09-19T11:03:11.000Z | 2018-09-19T11:03:11.000Z | src/lgr_advanced/lgr_editor/forms/importer.py | icann/lgr-django | 1d0e272e0273ea2f0fda07396e86450e95dfe255 | [
"BSD-3-Clause"
] | 15 | 2017-06-29T14:05:01.000Z | 2021-09-22T19:56:23.000Z | src/lgr_advanced/lgr_editor/forms/importer.py | icann/lgr-django | 1d0e272e0273ea2f0fda07396e86450e95dfe255 | [
"BSD-3-Clause"
] | 7 | 2017-06-14T17:59:19.000Z | 2019-08-09T03:16:03.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.core import validators
from django.utils.translation import ugettext_lazy as _
from .fields import (DEFAULT_UNICODE_VERSION,
VALIDATING_REPERTOIRES,
DEFAULT_VALIDATING_REPERTOIRE,
FILE_FIELD_ENCODING_HELP)
class CreateLGRForm(forms.Form):
name = forms.CharField(label=_("Name"))
validating_repertoire = forms.ChoiceField(label=_("Validating repertoire"),
help_text=_('Code points will be limited to the selected repertoire'),
required=False,
choices=(('', ''),) + VALIDATING_REPERTOIRES,
initial=DEFAULT_VALIDATING_REPERTOIRE)
unicode_version = forms.CharField(widget=forms.HiddenInput(),
initial=DEFAULT_UNICODE_VERSION)
class ImportLGRForm(forms.Form):
file = forms.FileField(label=_("Select file(s)"), required=True,
help_text=f"{_('If you select more than one file, this will create a LGR set.')} "
f"{FILE_FIELD_ENCODING_HELP}",
widget=forms.ClearableFileInput(attrs={'multiple': True}))
validating_repertoire = forms.ChoiceField(label=_("Validating repertoire"),
help_text=_('Code points will be limited to the selected repertoire'),
required=False,
choices=(('', ''),) + VALIDATING_REPERTOIRES,
initial=DEFAULT_VALIDATING_REPERTOIRE)
set_name = forms.CharField(label=_("LGR set name"),
required=False,
# TODO should catch that to get a valid LGR name
validators=[
validators.RegexValidator(r'^[\w\_\-\.]+$',
_('Enter a valid LGR set name. '
'This value may contain only letters, numbers '
'and ./-/_ characters.'), 'invalid'),
],
help_text=_('The name of the set'))
| 56.466667 | 116 | 0.480913 |
25ff0b5310a13ad25a95860fdcc47496b5b4365d | 2,575 | py | Python | opencv_tutorial/opencv_python_tutorials/Image_Processing/fourier_transfom.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | opencv_tutorial/opencv_python_tutorials/Image_Processing/fourier_transfom.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | opencv_tutorial/opencv_python_tutorials/Image_Processing/fourier_transfom.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 11:31:42 2019
@author: jone
"""
#%% Fourier Transform in Numpy
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('img/messi.jpg', 0)
f = np.fft.fft2(img)
# 좌상단에 있는 저주파 영역을 중앙으로 옮김
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
plt.figure(figsize=(8,6))
plt.subplot(121), plt.imshow(img, cmap='gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(magnitude_spectrum, cmap='gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
"""
주파수로 연산하기
"""
rows, cols = img.shape
crow, ccol = int(rows/2), int(cols/2) # 이미지의 중심 좌표
d = 30
fshift[crow-d:crow+d, ccol-d:ccol+d] = 0
# 푸리에 변환결과를 다시 이미지로 변환
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
plt.figure(figsize=(12, 6))
plt.subplot(131), plt.imshow(img, cmap='gray'), plt.title('Input Image'), plt.axis('off')
plt.subplot(132), plt.imshow(img_back, cmap='gray'), plt.title('Image after HPF'), plt.axis('off')
plt.subplot(133), plt.imshow(img_back), plt.title('Result in JET'), plt.axis('off')
plt.show()
#%% Fourier Transform in OpenCV
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('img/messi.jpg', 0)
dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0], dft_shift[:,:,1]))
plt.figure(figsize=(8,6))
plt.subplot(121), plt.imshow(img, cmap='gray'), plt.title('Input Image'), plt.axis('off')
plt.subplot(122), plt.imshow(magnitude_spectrum, cmap='gray'), plt.title('Magnitude Spectrum'), plt.axis('off')
plt.show()
"""
고주파 영역 제거 -> blur 효과
"""
rows, cols = img.shape
crow, ccol = int(rows/2), int(cols/2)
# create a mask first, center square is 1, remaining all zeros
# 아래는 d 사이즈의 사각형을 생성한 후, 사각형 바깥을 제거하는 형태
# 즉, 고주파 영역을 제거하게 됨
# d 값이 작을수록 사각형이 작고, 바깥영역(고주파 영역)이 많이 제거되기 때문에 이미지가 뭉개지고
# d 값이 클수록 사각형이 크고, 바깥영역(고주파 영역)이 적게 제거되기 떄문에 원래 이미지와 가까워
d = 30
mask = np.zeros((rows, cols, 2), np.uint8)
mask[crow-d:crow+d, ccol-d:ccol+d] = 1
# apply mask and inverse DFT
fshift = dft_shift*mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:,:,0], img_back[:,:,1])
plt.figure(figsize=(12, 6))
plt.subplot(121), plt.imshow(img, cmap='gray'), plt.title('Input Image'), plt.axis('off')
plt.subplot(122), plt.imshow(img_back, cmap='gray'), plt.title('Magnitude Spectrum'), plt.axis('off')
plt.show() | 29.597701 | 111 | 0.690874 |
10127e10d2bf41b6cae2085f36c474d77e6e9fb3 | 136,431 | py | Python | google/cloud/bigquery/client.py | taz002dev/googleapis-python-bigquery | f95f415d3441b3928f6cc705cb8a75603d790fd6 | [
"Apache-2.0"
] | null | null | null | google/cloud/bigquery/client.py | taz002dev/googleapis-python-bigquery | f95f415d3441b3928f6cc705cb8a75603d790fd6 | [
"Apache-2.0"
] | null | null | null | google/cloud/bigquery/client.py | taz002dev/googleapis-python-bigquery | f95f415d3441b3928f6cc705cb8a75603d790fd6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google BigQuery API."""
from __future__ import absolute_import
from __future__ import division
from collections import abc as collections_abc
import copy
import functools
import gzip
import io
import itertools
import json
import math
import os
import tempfile
import uuid
import warnings
try:
import pyarrow
except ImportError: # pragma: NO COVER
pyarrow = None
from google import resumable_media
from google.resumable_media.requests import MultipartUpload
from google.resumable_media.requests import ResumableUpload
import google.api_core.client_options
import google.api_core.exceptions
from google.api_core.iam import Policy
from google.api_core import page_iterator
import google.cloud._helpers
from google.cloud import exceptions
from google.cloud.client import ClientWithProject
from google.cloud.bigquery._helpers import _del_sub_prop
from google.cloud.bigquery._helpers import _get_sub_prop
from google.cloud.bigquery._helpers import _record_field_to_json
from google.cloud.bigquery._helpers import _str_or_none
from google.cloud.bigquery._helpers import _verify_job_config_type
from google.cloud.bigquery._http import Connection
from google.cloud.bigquery import _pandas_helpers
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetListItem
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.opentelemetry_tracing import create_span
from google.cloud.bigquery import job
from google.cloud.bigquery.model import Model
from google.cloud.bigquery.model import ModelReference
from google.cloud.bigquery.model import _model_arg_to_model_ref
from google.cloud.bigquery.query import _QueryResults
from google.cloud.bigquery.retry import DEFAULT_RETRY
from google.cloud.bigquery.routine import Routine
from google.cloud.bigquery.routine import RoutineReference
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import _table_arg_to_table
from google.cloud.bigquery.table import _table_arg_to_table_ref
from google.cloud.bigquery.table import Table
from google.cloud.bigquery.table import TableListItem
from google.cloud.bigquery.table import TableReference
from google.cloud.bigquery.table import RowIterator
_DEFAULT_CHUNKSIZE = 1048576 # 1024 * 1024 B = 1 MB
_MAX_MULTIPART_SIZE = 5 * 1024 * 1024
_DEFAULT_NUM_RETRIES = 6
_BASE_UPLOAD_TEMPLATE = "{host}/upload/bigquery/v2/projects/{project}/jobs?uploadType="
_MULTIPART_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + "multipart"
_RESUMABLE_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + "resumable"
_GENERIC_CONTENT_TYPE = "*/*"
_READ_LESS_THAN_SIZE = (
"Size {:d} was specified but the file-like object only had " "{:d} bytes remaining."
)
_NEED_TABLE_ARGUMENT = (
"The table argument should be a table ID string, Table, or TableReference"
)
_LIST_ROWS_FROM_QUERY_RESULTS_FIELDS = "jobReference,totalRows,pageToken,rows"
# In microbenchmarks, it's been shown that even in ideal conditions (query
# finished, local data), requests to getQueryResults can take 10+ seconds.
# In less-than-ideal situations, the response can take even longer, as it must
# be able to download a full 100+ MB row in that time. Don't let the
# connection timeout before data can be downloaded.
# https://github.com/googleapis/python-bigquery/issues/438
_MIN_GET_QUERY_RESULTS_TIMEOUT = 120
class Project(object):
"""Wrapper for resource describing a BigQuery project.
Args:
project_id (str): Opaque ID of the project
numeric_id (int): Numeric ID of the project
friendly_name (str): Display name of the project
"""
def __init__(self, project_id, numeric_id, friendly_name):
self.project_id = project_id
self.numeric_id = numeric_id
self.friendly_name = friendly_name
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct an instance from a resource dict."""
return cls(resource["id"], resource["numericId"], resource["friendlyName"])
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
Args:
project (Optional[str]):
Project ID for the project which the client acts on behalf of.
Will be passed when creating a dataset / job. If not passed,
falls back to the default inferred from the environment.
credentials (Optional[google.auth.credentials.Credentials]):
The OAuth2 Credentials to use for this client. If not passed
(and if no ``_http`` object is passed), falls back to the
default inferred from the environment.
_http (Optional[requests.Session]):
HTTP object to make requests. Can be any object that
defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an ``_http``
object is created that is bound to the ``credentials`` for the
current object.
This parameter should be considered private, and could change in
the future.
location (Optional[str]):
Default location for jobs / datasets / tables.
default_query_job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
Default ``QueryJobConfig``.
Will be merged into job configs passed into the ``query`` method.
client_info (Optional[google.api_core.client_info.ClientInfo]):
The client info used to send a user-agent string along with API
requests. If ``None``, then default info will be used. Generally,
you only need to set this if you're developing your own library
or partner tool.
client_options (Optional[Union[google.api_core.client_options.ClientOptions, Dict]]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
Raises:
google.auth.exceptions.DefaultCredentialsError:
Raised if ``credentials`` is not specified and the library fails
to acquire default credentials.
"""
SCOPE = (
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
)
"""The scopes required for authenticating as a BigQuery consumer."""
def __init__(
self,
project=None,
credentials=None,
_http=None,
location=None,
default_query_job_config=None,
client_info=None,
client_options=None,
):
super(Client, self).__init__(
project=project,
credentials=credentials,
client_options=client_options,
_http=_http,
)
kw_args = {"client_info": client_info}
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
kw_args["api_endpoint"] = api_endpoint
self._connection = Connection(self, **kw_args)
self._location = location
self._default_query_job_config = copy.deepcopy(default_query_job_config)
@property
def location(self):
"""Default location for jobs / datasets / tables."""
return self._location
def close(self):
"""Close the underlying transport objects, releasing system resources.
.. note::
The client instance can be used for making additional requests even
after closing, in which case the underlying connections are
automatically re-created.
"""
self._http._auth_request.session.close()
self._http.close()
def get_service_account_email(
self, project=None, retry=DEFAULT_RETRY, timeout=None
):
"""Get the email address of the project's BigQuery service account
Note:
This is the service account that BigQuery uses to manage tables
encrypted by a key in KMS.
Args:
project (Optional[str]):
Project ID to use for retreiving service account email.
Defaults to the client's project.
retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
str: service account email address
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> client.get_service_account_email()
my_service_account@my-project.iam.gserviceaccount.com
"""
if project is None:
project = self.project
path = "/projects/%s/serviceAccount" % (project,)
span_attributes = {"path": path}
api_response = self._call_api(
retry,
span_name="BigQuery.getServiceAccountEmail",
span_attributes=span_attributes,
method="GET",
path=path,
timeout=timeout,
)
return api_response["email"]
def list_projects(
self, max_results=None, page_token=None, retry=DEFAULT_RETRY, timeout=None
):
"""List projects for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list
Args:
max_results (Optional[int]):
Maximum number of projects to return, If not passed,
defaults to a value set by the API.
page_token (Optional[str]):
Token representing a cursor into the projects. If not passed,
the API will return the first page of projects. The token marks
the beginning of the iterator to be returned and the value of
the ``page_token`` can be accessed at ``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of :class:`~google.cloud.bigquery.client.Project`
accessible to the current client.
"""
span_attributes = {"path": "/projects"}
def api_request(*args, **kwargs):
return self._call_api(
retry,
span_name="BigQuery.listProjects",
span_attributes=span_attributes,
*args,
timeout=timeout,
**kwargs,
)
return page_iterator.HTTPIterator(
client=self,
api_request=api_request,
path="/projects",
item_to_value=_item_to_project,
items_key="projects",
page_token=page_token,
max_results=max_results,
)
def list_datasets(
self,
project=None,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
timeout=None,
):
"""List datasets for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
Args:
project (Optional[str]):
Project ID to use for retreiving datasets. Defaults to the
client's project.
include_all (Optional[bool]):
True if results include hidden datasets. Defaults to False.
filter (Optional[str]):
An expression for filtering the results by label.
For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#body.QUERY_PARAMETERS.filter
max_results (Optional[int]):
Maximum number of datasets to return.
page_token (Optional[str]):
Token representing a cursor into the datasets. If not passed,
the API will return the first page of datasets. The token marks
the beginning of the iterator to be returned and the value of
the ``page_token`` can be accessed at ``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of :class:`~google.cloud.bigquery.dataset.DatasetListItem`.
associated with the project.
"""
extra_params = {}
if project is None:
project = self.project
if include_all:
extra_params["all"] = True
if filter:
# TODO: consider supporting a dict of label -> value for filter,
# and converting it into a string here.
extra_params["filter"] = filter
path = "/projects/%s/datasets" % (project,)
span_attributes = {"path": path}
def api_request(*args, **kwargs):
return self._call_api(
retry,
span_name="BigQuery.listDatasets",
span_attributes=span_attributes,
*args,
timeout=timeout,
**kwargs,
)
return page_iterator.HTTPIterator(
client=self,
api_request=api_request,
path=path,
item_to_value=_item_to_dataset,
items_key="datasets",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
)
def dataset(self, dataset_id, project=None):
"""Deprecated: Construct a reference to a dataset.
.. deprecated:: 1.24.0
Construct a
:class:`~google.cloud.bigquery.dataset.DatasetReference` using its
constructor or use a string where previously a reference object
was used.
As of ``google-cloud-bigquery`` version 1.7.0, all client methods
that take a
:class:`~google.cloud.bigquery.dataset.DatasetReference` or
:class:`~google.cloud.bigquery.table.TableReference` also take a
string in standard SQL format, e.g. ``project.dataset_id`` or
``project.dataset_id.table_id``.
Args:
dataset_id (str): ID of the dataset.
project (Optional[str]):
Project ID for the dataset (defaults to the project of the client).
Returns:
google.cloud.bigquery.dataset.DatasetReference:
a new ``DatasetReference`` instance.
"""
if project is None:
project = self.project
warnings.warn(
"Client.dataset is deprecated and will be removed in a future version. "
"Use a string like 'my_project.my_dataset' or a "
"cloud.google.bigquery.DatasetReference object, instead.",
PendingDeprecationWarning,
stacklevel=2,
)
return DatasetReference(project, dataset_id)
def _create_bqstorage_client(self):
"""Create a BigQuery Storage API client using this client's credentials.
If a client cannot be created due to missing dependencies, raise a
warning and return ``None``.
Returns:
Optional[google.cloud.bigquery_storage.BigQueryReadClient]:
A BigQuery Storage API client.
"""
try:
from google.cloud import bigquery_storage
except ImportError:
warnings.warn(
"Cannot create BigQuery Storage client, the dependency "
"google-cloud-bigquery-storage is not installed."
)
return None
return bigquery_storage.BigQueryReadClient(credentials=self._credentials)
def _dataset_from_arg(self, dataset):
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if not isinstance(dataset, (Dataset, DatasetReference)):
if isinstance(dataset, DatasetListItem):
dataset = dataset.reference
else:
raise TypeError(
"dataset must be a Dataset, DatasetReference, DatasetListItem,"
" or string"
)
return dataset
def create_dataset(
self, dataset, exists_ok=False, retry=DEFAULT_RETRY, timeout=None
):
"""API call: create the dataset via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
Args:
dataset (Union[ \
google.cloud.bigquery.dataset.Dataset, \
google.cloud.bigquery.dataset.DatasetReference, \
google.cloud.bigquery.dataset.DatasetListItem, \
str, \
]):
A :class:`~google.cloud.bigquery.dataset.Dataset` to create.
If ``dataset`` is a reference, an empty dataset is created
with the specified ID and client's default location.
exists_ok (Optional[bool]):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the dataset.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.dataset.Dataset:
A new ``Dataset`` returned from the API.
Raises:
google.cloud.exceptions.Conflict:
If the dataset already exists.
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> dataset = bigquery.Dataset('my_project.my_dataset')
>>> dataset = client.create_dataset(dataset)
"""
dataset = self._dataset_from_arg(dataset)
if isinstance(dataset, DatasetReference):
dataset = Dataset(dataset)
path = "/projects/%s/datasets" % (dataset.project,)
data = dataset.to_api_repr()
if data.get("location") is None and self.location is not None:
data["location"] = self.location
try:
span_attributes = {"path": path}
api_response = self._call_api(
retry,
span_name="BigQuery.createDataset",
span_attributes=span_attributes,
method="POST",
path=path,
data=data,
timeout=timeout,
)
return Dataset.from_api_repr(api_response)
except google.api_core.exceptions.Conflict:
if not exists_ok:
raise
return self.get_dataset(dataset.reference, retry=retry)
def create_routine(
self, routine, exists_ok=False, retry=DEFAULT_RETRY, timeout=None
):
"""[Beta] Create a routine via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines/insert
Args:
routine (google.cloud.bigquery.routine.Routine):
A :class:`~google.cloud.bigquery.routine.Routine` to create.
The dataset that the routine belongs to must already exist.
exists_ok (Optional[bool]):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the routine.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.routine.Routine:
A new ``Routine`` returned from the service.
Raises:
google.cloud.exceptions.Conflict:
If the routine already exists.
"""
reference = routine.reference
path = "/projects/{}/datasets/{}/routines".format(
reference.project, reference.dataset_id
)
resource = routine.to_api_repr()
try:
span_attributes = {"path": path}
api_response = self._call_api(
retry,
span_name="BigQuery.createRoutine",
span_attributes=span_attributes,
method="POST",
path=path,
data=resource,
timeout=timeout,
)
return Routine.from_api_repr(api_response)
except google.api_core.exceptions.Conflict:
if not exists_ok:
raise
return self.get_routine(routine.reference, retry=retry)
def create_table(self, table, exists_ok=False, retry=DEFAULT_RETRY, timeout=None):
"""API call: create a table via a PUT request
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
table (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
A :class:`~google.cloud.bigquery.table.Table` to create.
If ``table`` is a reference, an empty table is created
with the specified ID. The dataset that the table belongs to
must already exist.
exists_ok (Optional[bool]):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the table.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.table.Table:
A new ``Table`` returned from the service.
Raises:
google.cloud.exceptions.Conflict:
If the table already exists.
"""
table = _table_arg_to_table(table, default_project=self.project)
dataset_id = table.dataset_id
path = "/projects/%s/datasets/%s/tables" % (table.project, dataset_id)
data = table.to_api_repr()
try:
span_attributes = {"path": path, "dataset_id": dataset_id}
api_response = self._call_api(
retry,
span_name="BigQuery.createTable",
span_attributes=span_attributes,
method="POST",
path=path,
data=data,
timeout=timeout,
)
return Table.from_api_repr(api_response)
except google.api_core.exceptions.Conflict:
if not exists_ok:
raise
return self.get_table(table.reference, retry=retry)
def _call_api(
self, retry, span_name=None, span_attributes=None, job_ref=None, **kwargs
):
call = functools.partial(self._connection.api_request, **kwargs)
if retry:
call = retry(call)
if span_name is not None:
with create_span(
name=span_name, attributes=span_attributes, client=self, job_ref=job_ref
):
return call()
return call()
def get_dataset(self, dataset_ref, retry=DEFAULT_RETRY, timeout=None):
"""Fetch the dataset referenced by ``dataset_ref``
Args:
dataset_ref (Union[ \
google.cloud.bigquery.dataset.DatasetReference, \
str, \
]):
A reference to the dataset to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
dataset reference from a string using
:func:`~google.cloud.bigquery.dataset.DatasetReference.from_string`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.dataset.Dataset:
A ``Dataset`` instance.
"""
if isinstance(dataset_ref, str):
dataset_ref = DatasetReference.from_string(
dataset_ref, default_project=self.project
)
path = dataset_ref.path
span_attributes = {"path": path}
api_response = self._call_api(
retry,
span_name="BigQuery.getDataset",
span_attributes=span_attributes,
method="GET",
path=path,
timeout=timeout,
)
return Dataset.from_api_repr(api_response)
def get_iam_policy(
self, table, requested_policy_version=1, retry=DEFAULT_RETRY, timeout=None,
):
if not isinstance(table, (Table, TableReference)):
raise TypeError("table must be a Table or TableReference")
if requested_policy_version != 1:
raise ValueError("only IAM policy version 1 is supported")
body = {"options": {"requestedPolicyVersion": 1}}
path = "{}:getIamPolicy".format(table.path)
span_attributes = {"path": path}
response = self._call_api(
retry,
span_name="BigQuery.getIamPolicy",
span_attributes=span_attributes,
method="POST",
path=path,
data=body,
timeout=timeout,
)
return Policy.from_api_repr(response)
def set_iam_policy(
self, table, policy, updateMask=None, retry=DEFAULT_RETRY, timeout=None,
):
if not isinstance(table, (Table, TableReference)):
raise TypeError("table must be a Table or TableReference")
if not isinstance(policy, (Policy)):
raise TypeError("policy must be a Policy")
body = {"policy": policy.to_api_repr()}
if updateMask is not None:
body["updateMask"] = updateMask
path = "{}:setIamPolicy".format(table.path)
span_attributes = {"path": path}
response = self._call_api(
retry,
span_name="BigQuery.setIamPolicy",
span_attributes=span_attributes,
method="POST",
path=path,
data=body,
timeout=timeout,
)
return Policy.from_api_repr(response)
def test_iam_permissions(
self, table, permissions, retry=DEFAULT_RETRY, timeout=None,
):
if not isinstance(table, (Table, TableReference)):
raise TypeError("table must be a Table or TableReference")
body = {"permissions": permissions}
path = "{}:testIamPermissions".format(table.path)
span_attributes = {"path": path}
response = self._call_api(
retry,
span_name="BigQuery.testIamPermissions",
span_attributes=span_attributes,
method="POST",
path=path,
data=body,
timeout=timeout,
)
return response
def get_model(self, model_ref, retry=DEFAULT_RETRY, timeout=None):
"""[Beta] Fetch the model referenced by ``model_ref``.
Args:
model_ref (Union[ \
google.cloud.bigquery.model.ModelReference, \
str, \
]):
A reference to the model to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
model reference from a string using
:func:`google.cloud.bigquery.model.ModelReference.from_string`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.model.Model: A ``Model`` instance.
"""
if isinstance(model_ref, str):
model_ref = ModelReference.from_string(
model_ref, default_project=self.project
)
path = model_ref.path
span_attributes = {"path": path}
api_response = self._call_api(
retry,
span_name="BigQuery.getModel",
span_attributes=span_attributes,
method="GET",
path=path,
timeout=timeout,
)
return Model.from_api_repr(api_response)
def get_routine(self, routine_ref, retry=DEFAULT_RETRY, timeout=None):
"""[Beta] Get the routine referenced by ``routine_ref``.
Args:
routine_ref (Union[ \
google.cloud.bigquery.routine.Routine, \
google.cloud.bigquery.routine.RoutineReference, \
str, \
]):
A reference to the routine to fetch from the BigQuery API. If
a string is passed in, this method attempts to create a
reference from a string using
:func:`google.cloud.bigquery.routine.RoutineReference.from_string`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the API call.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.routine.Routine:
A ``Routine`` instance.
"""
if isinstance(routine_ref, str):
routine_ref = RoutineReference.from_string(
routine_ref, default_project=self.project
)
path = routine_ref.path
span_attributes = {"path": path}
api_response = self._call_api(
retry,
span_name="BigQuery.getRoutine",
span_attributes=span_attributes,
method="GET",
path=path,
timeout=timeout,
)
return Routine.from_api_repr(api_response)
def get_table(self, table, retry=DEFAULT_RETRY, timeout=None):
"""Fetch the table referenced by ``table``.
Args:
table (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
A reference to the table to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
table reference from a string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.table.Table:
A ``Table`` instance.
"""
table_ref = _table_arg_to_table_ref(table, default_project=self.project)
path = table_ref.path
span_attributes = {"path": path}
api_response = self._call_api(
retry,
span_name="BigQuery.getTable",
span_attributes=span_attributes,
method="GET",
path=path,
timeout=timeout,
)
return Table.from_api_repr(api_response)
def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY, timeout=None):
"""Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
Args:
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset to update.
fields (Sequence[str]):
The properties of ``dataset`` to change. These are strings
corresponding to the properties of
:class:`~google.cloud.bigquery.dataset.Dataset`.
For example, to update the default expiration times, specify
both properties in the ``fields`` argument:
.. code-block:: python
bigquery_client.update_dataset(
dataset,
[
"default_partition_expiration_ms",
"default_table_expiration_ms",
]
)
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.dataset.Dataset:
The modified ``Dataset`` instance.
"""
partial = dataset._build_resource(fields)
if dataset.etag is not None:
headers = {"If-Match": dataset.etag}
else:
headers = None
path = dataset.path
span_attributes = {"path": path, "fields": fields}
api_response = self._call_api(
retry,
span_name="BigQuery.updateDataset",
span_attributes=span_attributes,
method="PATCH",
path=path,
data=partial,
headers=headers,
timeout=timeout,
)
return Dataset.from_api_repr(api_response)
def update_model(self, model, fields, retry=DEFAULT_RETRY, timeout=None):
"""[Beta] Change some fields of a model.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``model``, the field value will be deleted.
If ``model.etag`` is not ``None``, the update will only succeed if
the model on the server has the same ETag. Thus reading a model with
``get_model``, changing its fields, and then passing it to
``update_model`` will ensure that the changes will only be saved if
no modifications to the model occurred since the read.
Args:
model (google.cloud.bigquery.model.Model): The model to update.
fields (Sequence[str]):
The properties of ``model`` to change. These are strings
corresponding to the properties of
:class:`~google.cloud.bigquery.model.Model`.
For example, to update the descriptive properties of the model,
specify them in the ``fields`` argument:
.. code-block:: python
bigquery_client.update_model(
model, ["description", "friendly_name"]
)
retry (Optional[google.api_core.retry.Retry]):
A description of how to retry the API call.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.model.Model:
The model resource returned from the API call.
"""
partial = model._build_resource(fields)
if model.etag:
headers = {"If-Match": model.etag}
else:
headers = None
path = model.path
span_attributes = {"path": path, "fields": fields}
api_response = self._call_api(
retry,
span_name="BigQuery.updateModel",
span_attributes=span_attributes,
method="PATCH",
path=path,
data=partial,
headers=headers,
timeout=timeout,
)
return Model.from_api_repr(api_response)
def update_routine(self, routine, fields, retry=DEFAULT_RETRY, timeout=None):
"""[Beta] Change some fields of a routine.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``routine``, the field value will be deleted.
.. warning::
During beta, partial updates are not supported. You must provide
all fields in the resource.
If :attr:`~google.cloud.bigquery.routine.Routine.etag` is not
``None``, the update will only succeed if the resource on the server
has the same ETag. Thus reading a routine with
:func:`~google.cloud.bigquery.client.Client.get_routine`, changing
its fields, and then passing it to this method will ensure that the
changes will only be saved if no modifications to the resource
occurred since the read.
Args:
routine (google.cloud.bigquery.routine.Routine):
The routine to update.
fields (Sequence[str]):
The fields of ``routine`` to change, spelled as the
:class:`~google.cloud.bigquery.routine.Routine` properties.
For example, to update the description property of the routine,
specify it in the ``fields`` argument:
.. code-block:: python
bigquery_client.update_routine(
routine, ["description"]
)
retry (Optional[google.api_core.retry.Retry]):
A description of how to retry the API call.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.routine.Routine:
The routine resource returned from the API call.
"""
partial = routine._build_resource(fields)
if routine.etag:
headers = {"If-Match": routine.etag}
else:
headers = None
# TODO: remove when routines update supports partial requests.
partial["routineReference"] = routine.reference.to_api_repr()
path = routine.path
span_attributes = {"path": path, "fields": fields}
api_response = self._call_api(
retry,
span_name="BigQuery.updateRoutine",
span_attributes=span_attributes,
method="PUT",
path=path,
data=partial,
headers=headers,
timeout=timeout,
)
return Routine.from_api_repr(api_response)
def update_table(self, table, fields, retry=DEFAULT_RETRY, timeout=None):
"""Change some fields of a table.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``table``, the field value will be deleted.
If ``table.etag`` is not ``None``, the update will only succeed if
the table on the server has the same ETag. Thus reading a table with
``get_table``, changing its fields, and then passing it to
``update_table`` will ensure that the changes will only be saved if
no modifications to the table occurred since the read.
Args:
table (google.cloud.bigquery.table.Table): The table to update.
fields (Sequence[str]):
The fields of ``table`` to change, spelled as the
:class:`~google.cloud.bigquery.table.Table` properties.
For example, to update the descriptive properties of the table,
specify them in the ``fields`` argument:
.. code-block:: python
bigquery_client.update_table(
table,
["description", "friendly_name"]
)
retry (Optional[google.api_core.retry.Retry]):
A description of how to retry the API call.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.table.Table:
The table resource returned from the API call.
"""
partial = table._build_resource(fields)
if table.etag is not None:
headers = {"If-Match": table.etag}
else:
headers = None
path = table.path
span_attributes = {"path": path, "fields": fields}
api_response = self._call_api(
retry,
span_name="BigQuery.updateTable",
span_attributes=span_attributes,
method="PATCH",
path=path,
data=partial,
headers=headers,
timeout=timeout,
)
return Table.from_api_repr(api_response)
def list_models(
self,
dataset,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
timeout=None,
):
"""[Beta] List models in the dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/models/list
Args:
dataset (Union[ \
google.cloud.bigquery.dataset.Dataset, \
google.cloud.bigquery.dataset.DatasetReference, \
google.cloud.bigquery.dataset.DatasetListItem, \
str, \
]):
A reference to the dataset whose models to list from the
BigQuery API. If a string is passed in, this method attempts
to create a dataset reference from a string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
max_results (Optional[int]):
Maximum number of models to return. If not passed, defaults to a
value set by the API.
page_token (Optional[str]):
Token representing a cursor into the models. If not passed,
the API will return the first page of models. The token marks
the beginning of the iterator to be returned and the value of
the ``page_token`` can be accessed at ``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.model.Model` contained
within the requested dataset.
"""
dataset = self._dataset_from_arg(dataset)
path = "%s/models" % dataset.path
span_attributes = {"path": path}
def api_request(*args, **kwargs):
return self._call_api(
retry,
span_name="BigQuery.listModels",
span_attributes=span_attributes,
*args,
timeout=timeout,
**kwargs,
)
result = page_iterator.HTTPIterator(
client=self,
api_request=api_request,
path=path,
item_to_value=_item_to_model,
items_key="models",
page_token=page_token,
max_results=max_results,
)
result.dataset = dataset
return result
def list_routines(
self,
dataset,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
timeout=None,
):
"""[Beta] List routines in the dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines/list
Args:
dataset (Union[ \
google.cloud.bigquery.dataset.Dataset, \
google.cloud.bigquery.dataset.DatasetReference, \
google.cloud.bigquery.dataset.DatasetListItem, \
str, \
]):
A reference to the dataset whose routines to list from the
BigQuery API. If a string is passed in, this method attempts
to create a dataset reference from a string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
max_results (Optional[int]):
Maximum number of routines to return. If not passed, defaults
to a value set by the API.
page_token (Optional[str]):
Token representing a cursor into the routines. If not passed,
the API will return the first page of routines. The token marks
the beginning of the iterator to be returned and the value of the
``page_token`` can be accessed at ``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of all
:class:`~google.cloud.bigquery.routine.Routine`s contained
within the requested dataset, limited by ``max_results``.
"""
dataset = self._dataset_from_arg(dataset)
path = "{}/routines".format(dataset.path)
span_attributes = {"path": path}
def api_request(*args, **kwargs):
return self._call_api(
retry,
span_name="BigQuery.listRoutines",
span_attributes=span_attributes,
*args,
timeout=timeout,
**kwargs,
)
result = page_iterator.HTTPIterator(
client=self,
api_request=api_request,
path=path,
item_to_value=_item_to_routine,
items_key="routines",
page_token=page_token,
max_results=max_results,
)
result.dataset = dataset
return result
def list_tables(
self,
dataset,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
timeout=None,
):
"""List tables in the dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
Args:
dataset (Union[ \
google.cloud.bigquery.dataset.Dataset, \
google.cloud.bigquery.dataset.DatasetReference, \
google.cloud.bigquery.dataset.DatasetListItem, \
str, \
]):
A reference to the dataset whose tables to list from the
BigQuery API. If a string is passed in, this method attempts
to create a dataset reference from a string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
max_results (Optional[int]):
Maximum number of tables to return. If not passed, defaults
to a value set by the API.
page_token (Optional[str]):
Token representing a cursor into the tables. If not passed,
the API will return the first page of tables. The token marks
the beginning of the iterator to be returned and the value of
the ``page_token`` can be accessed at ``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.table.TableListItem` contained
within the requested dataset.
"""
dataset = self._dataset_from_arg(dataset)
path = "%s/tables" % dataset.path
span_attributes = {"path": path}
def api_request(*args, **kwargs):
return self._call_api(
retry,
span_name="BigQuery.listTables",
span_attributes=span_attributes,
*args,
timeout=timeout,
**kwargs,
)
result = page_iterator.HTTPIterator(
client=self,
api_request=api_request,
path=path,
item_to_value=_item_to_table,
items_key="tables",
page_token=page_token,
max_results=max_results,
)
result.dataset = dataset
return result
def delete_dataset(
self,
dataset,
delete_contents=False,
retry=DEFAULT_RETRY,
timeout=None,
not_found_ok=False,
):
"""Delete a dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
Args:
dataset (Union[ \
google.cloud.bigquery.dataset.Dataset, \
google.cloud.bigquery.dataset.DatasetReference, \
google.cloud.bigquery.dataset.DatasetListItem, \
str, \
]):
A reference to the dataset to delete. If a string is passed
in, this method attempts to create a dataset reference from a
string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
delete_contents (Optional[bool]):
If True, delete all the tables in the dataset. If False and
the dataset contains tables, the request will fail.
Default is False.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
not_found_ok (Optional[bool]):
Defaults to ``False``. If ``True``, ignore "not found" errors
when deleting the dataset.
"""
dataset = self._dataset_from_arg(dataset)
params = {}
path = dataset.path
if delete_contents:
params["deleteContents"] = "true"
span_attributes = {"path": path, "deleteContents": delete_contents}
else:
span_attributes = {"path": path}
try:
self._call_api(
retry,
span_name="BigQuery.deleteDataset",
span_attributes=span_attributes,
method="DELETE",
path=path,
query_params=params,
timeout=timeout,
)
except google.api_core.exceptions.NotFound:
if not not_found_ok:
raise
def delete_model(
self, model, retry=DEFAULT_RETRY, timeout=None, not_found_ok=False
):
"""[Beta] Delete a model
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/models/delete
Args:
model (Union[ \
google.cloud.bigquery.model.Model, \
google.cloud.bigquery.model.ModelReference, \
str, \
]):
A reference to the model to delete. If a string is passed in,
this method attempts to create a model reference from a
string using
:func:`google.cloud.bigquery.model.ModelReference.from_string`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
not_found_ok (Optional[bool]):
Defaults to ``False``. If ``True``, ignore "not found" errors
when deleting the model.
"""
if isinstance(model, str):
model = ModelReference.from_string(model, default_project=self.project)
if not isinstance(model, (Model, ModelReference)):
raise TypeError("model must be a Model or a ModelReference")
path = model.path
try:
span_attributes = {"path": path}
self._call_api(
retry,
span_name="BigQuery.deleteModel",
span_attributes=span_attributes,
method="DELETE",
path=path,
timeout=timeout,
)
except google.api_core.exceptions.NotFound:
if not not_found_ok:
raise
def delete_routine(
self, routine, retry=DEFAULT_RETRY, timeout=None, not_found_ok=False
):
"""[Beta] Delete a routine.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines/delete
Args:
routine (Union[ \
google.cloud.bigquery.routine.Routine, \
google.cloud.bigquery.routine.RoutineReference, \
str, \
]):
A reference to the routine to delete. If a string is passed
in, this method attempts to create a routine reference from a
string using
:func:`google.cloud.bigquery.routine.RoutineReference.from_string`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
not_found_ok (Optional[bool]):
Defaults to ``False``. If ``True``, ignore "not found" errors
when deleting the routine.
"""
if isinstance(routine, str):
routine = RoutineReference.from_string(
routine, default_project=self.project
)
path = routine.path
if not isinstance(routine, (Routine, RoutineReference)):
raise TypeError("routine must be a Routine or a RoutineReference")
try:
span_attributes = {"path": path}
self._call_api(
retry,
span_name="BigQuery.deleteRoutine",
span_attributes=span_attributes,
method="DELETE",
path=path,
timeout=timeout,
)
except google.api_core.exceptions.NotFound:
if not not_found_ok:
raise
def delete_table(
self, table, retry=DEFAULT_RETRY, timeout=None, not_found_ok=False
):
"""Delete a table
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/delete
Args:
table (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
A reference to the table to delete. If a string is passed in,
this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
not_found_ok (Optional[bool]):
Defaults to ``False``. If ``True``, ignore "not found" errors
when deleting the table.
"""
table = _table_arg_to_table_ref(table, default_project=self.project)
if not isinstance(table, TableReference):
raise TypeError("Unable to get TableReference for table '{}'".format(table))
try:
path = table.path
span_attributes = {"path": path}
self._call_api(
retry,
span_name="BigQuery.deleteTable",
span_attributes=span_attributes,
method="DELETE",
path=path,
timeout=timeout,
)
except google.api_core.exceptions.NotFound:
if not not_found_ok:
raise
def _get_query_results(
self, job_id, retry, project=None, timeout_ms=None, location=None, timeout=None,
):
"""Get the query results object for a query job.
Args:
job_id (str): Name of the query job.
retry (google.api_core.retry.Retry):
How to retry the RPC.
project (Optional[str]):
Project ID for the query job (defaults to the project of the client).
timeout_ms (Optional[int]):
Number of milliseconds the the API call should wait for the query
to complete before the request times out.
location (Optional[str]): Location of the query job.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``. If set, this connection timeout may be
increased to a minimum value. This prevents retries on what
would otherwise be a successful response.
Returns:
google.cloud.bigquery.query._QueryResults:
A new ``_QueryResults`` instance.
"""
extra_params = {"maxResults": 0}
if timeout is not None:
timeout = max(timeout, _MIN_GET_QUERY_RESULTS_TIMEOUT)
if project is None:
project = self.project
if timeout_ms is not None:
extra_params["timeoutMs"] = timeout_ms
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/queries/{}".format(project, job_id)
# This call is typically made in a polling loop that checks whether the
# job is complete (from QueryJob.done(), called ultimately from
# QueryJob.result()). So we don't need to poll here.
span_attributes = {"path": path}
resource = self._call_api(
retry,
span_name="BigQuery.getQueryResults",
span_attributes=span_attributes,
method="GET",
path=path,
query_params=extra_params,
timeout=timeout,
)
return _QueryResults.from_api_repr(resource)
def job_from_resource(self, resource):
"""Detect correct job type from resource and instantiate.
Args:
resource (Dict): one job resource from API response
Returns:
Union[ \
google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob \
]:
The job instance, constructed via the resource.
"""
config = resource.get("configuration", {})
if "load" in config:
return job.LoadJob.from_api_repr(resource, self)
elif "copy" in config:
return job.CopyJob.from_api_repr(resource, self)
elif "extract" in config:
return job.ExtractJob.from_api_repr(resource, self)
elif "query" in config:
return job.QueryJob.from_api_repr(resource, self)
return job.UnknownJob.from_api_repr(resource, self)
def create_job(self, job_config, retry=DEFAULT_RETRY, timeout=None):
"""Create a new job.
Args:
job_config (dict): configuration job representation returned from the API.
Keyword Arguments:
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
Union[ \
google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob \
]:
A new job instance.
"""
if "load" in job_config:
load_job_config = google.cloud.bigquery.job.LoadJobConfig.from_api_repr(
job_config
)
destination = _get_sub_prop(job_config, ["load", "destinationTable"])
source_uris = _get_sub_prop(job_config, ["load", "sourceUris"])
destination = TableReference.from_api_repr(destination)
return self.load_table_from_uri(
source_uris,
destination,
job_config=load_job_config,
retry=retry,
timeout=timeout,
)
elif "copy" in job_config:
copy_job_config = google.cloud.bigquery.job.CopyJobConfig.from_api_repr(
job_config
)
destination = _get_sub_prop(job_config, ["copy", "destinationTable"])
destination = TableReference.from_api_repr(destination)
sources = []
source_configs = _get_sub_prop(job_config, ["copy", "sourceTables"])
if source_configs is None:
source_configs = [_get_sub_prop(job_config, ["copy", "sourceTable"])]
for source_config in source_configs:
table_ref = TableReference.from_api_repr(source_config)
sources.append(table_ref)
return self.copy_table(
sources,
destination,
job_config=copy_job_config,
retry=retry,
timeout=timeout,
)
elif "extract" in job_config:
extract_job_config = google.cloud.bigquery.job.ExtractJobConfig.from_api_repr(
job_config
)
source = _get_sub_prop(job_config, ["extract", "sourceTable"])
if source:
source_type = "Table"
source = TableReference.from_api_repr(source)
else:
source = _get_sub_prop(job_config, ["extract", "sourceModel"])
source_type = "Model"
source = ModelReference.from_api_repr(source)
destination_uris = _get_sub_prop(job_config, ["extract", "destinationUris"])
return self.extract_table(
source,
destination_uris,
job_config=extract_job_config,
retry=retry,
timeout=timeout,
source_type=source_type,
)
elif "query" in job_config:
copy_config = copy.deepcopy(job_config)
_del_sub_prop(copy_config, ["query", "destinationTable"])
query_job_config = google.cloud.bigquery.job.QueryJobConfig.from_api_repr(
copy_config
)
query = _get_sub_prop(copy_config, ["query", "query"])
return self.query(
query, job_config=query_job_config, retry=retry, timeout=timeout
)
else:
raise TypeError("Invalid job configuration received.")
def get_job(
self, job_id, project=None, location=None, retry=DEFAULT_RETRY, timeout=None
):
"""Fetch a job for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
Args:
job_id (str): Unique job identifier.
Keyword Arguments:
project (Optional[str]):
ID of the project which owns the job (defaults to the client's project).
location (Optional[str]): Location where the job was run.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
Union[ \
google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob \
]:
Job instance, based on the resource returned by the API.
"""
extra_params = {"projection": "full"}
if project is None:
project = self.project
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/jobs/{}".format(project, job_id)
span_attributes = {"path": path, "job_id": job_id, "location": location}
resource = self._call_api(
retry,
span_name="BigQuery.getJob",
span_attributes=span_attributes,
method="GET",
path=path,
query_params=extra_params,
timeout=timeout,
)
return self.job_from_resource(resource)
def cancel_job(
self, job_id, project=None, location=None, retry=DEFAULT_RETRY, timeout=None
):
"""Attempt to cancel a job from a job ID.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel
Args:
job_id (str): Unique job identifier.
Keyword Arguments:
project (Optional[str]):
ID of the project which owns the job (defaults to the client's project).
location (Optional[str]): Location where the job was run.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
Union[ \
google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob, \
]:
Job instance, based on the resource returned by the API.
"""
extra_params = {"projection": "full"}
if project is None:
project = self.project
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/jobs/{}/cancel".format(project, job_id)
span_attributes = {"path": path, "job_id": job_id, "location": location}
resource = self._call_api(
retry,
span_name="BigQuery.cancelJob",
span_attributes=span_attributes,
method="POST",
path=path,
query_params=extra_params,
timeout=timeout,
)
return self.job_from_resource(resource["job"])
def list_jobs(
self,
project=None,
parent_job=None,
max_results=None,
page_token=None,
all_users=None,
state_filter=None,
retry=DEFAULT_RETRY,
timeout=None,
min_creation_time=None,
max_creation_time=None,
):
"""List jobs for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/list
Args:
project (Optional[str]):
Project ID to use for retreiving datasets. Defaults
to the client's project.
parent_job (Optional[Union[ \
google.cloud.bigquery.job._AsyncJob, \
str, \
]]):
If set, retrieve only child jobs of the specified parent.
max_results (Optional[int]):
Maximum number of jobs to return.
page_token (Optional[str]):
Opaque marker for the next "page" of jobs. If not
passed, the API will return the first page of jobs. The token
marks the beginning of the iterator to be returned and the
value of the ``page_token`` can be accessed at
``next_page_token`` of
:class:`~google.api_core.page_iterator.HTTPIterator`.
all_users (Optional[bool]):
If true, include jobs owned by all users in the project.
Defaults to :data:`False`.
state_filter (Optional[str]):
If set, include only jobs matching the given state. One of:
* ``"done"``
* ``"pending"``
* ``"running"``
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
min_creation_time (Optional[datetime.datetime]):
Min value for job creation time. If set, only jobs created
after or at this timestamp are returned. If the datetime has
no time zone assumes UTC time.
max_creation_time (Optional[datetime.datetime]):
Max value for job creation time. If set, only jobs created
before or at this timestamp are returned. If the datetime has
no time zone assumes UTC time.
Returns:
google.api_core.page_iterator.Iterator:
Iterable of job instances.
"""
if isinstance(parent_job, job._AsyncJob):
parent_job = parent_job.job_id
extra_params = {
"allUsers": all_users,
"stateFilter": state_filter,
"minCreationTime": _str_or_none(
google.cloud._helpers._millis_from_datetime(min_creation_time)
),
"maxCreationTime": _str_or_none(
google.cloud._helpers._millis_from_datetime(max_creation_time)
),
"projection": "full",
"parentJobId": parent_job,
}
extra_params = {
param: value for param, value in extra_params.items() if value is not None
}
if project is None:
project = self.project
path = "/projects/%s/jobs" % (project,)
span_attributes = {"path": path}
def api_request(*args, **kwargs):
return self._call_api(
retry,
span_name="BigQuery.listJobs",
span_attributes=span_attributes,
*args,
timeout=timeout,
**kwargs,
)
return page_iterator.HTTPIterator(
client=self,
api_request=api_request,
path=path,
item_to_value=_item_to_job,
items_key="jobs",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
)
def load_table_from_uri(
self,
source_uris,
destination,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
timeout=None,
):
"""Starts a job for loading data into a table from CloudStorage.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload
Args:
source_uris (Union[str, Sequence[str]]):
URIs of data files to be loaded; in format
``gs://<bucket_name>/<object_name_or_glob>``.
destination (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
job_id (Optional[str]): Name of the job.
job_id_prefix (Optional[str]):
The user-provided prefix for a randomly generated job ID.
This parameter will be ignored if a ``job_id`` is also given.
location (Optional[str]):
Location where to run the job. Must match the location of the
destination table.
project (Optional[str]):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (Optional[google.cloud.bigquery.job.LoadJobConfig]):
Extra configuration options for the job.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
TypeError:
If ``job_config`` is not an instance of :class:`~google.cloud.bigquery.job.LoadJobConfig`
class.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
if isinstance(source_uris, str):
source_uris = [source_uris]
destination = _table_arg_to_table_ref(destination, default_project=self.project)
if job_config:
job_config = copy.deepcopy(job_config)
_verify_job_config_type(job_config, google.cloud.bigquery.job.LoadJobConfig)
load_job = job.LoadJob(job_ref, source_uris, destination, self, job_config)
load_job._begin(retry=retry, timeout=timeout)
return load_job
def load_table_from_file(
self,
file_obj,
destination,
rewind=False,
size=None,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
timeout=None,
):
"""Upload the contents of this table from a file-like object.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Args:
file_obj (file): A file handle opened in binary mode for reading.
destination (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
rewind (Optional[bool]):
If True, seek to the beginning of the file handle before
reading the file.
size (Optional[int]):
The number of bytes to read from the file handle. If size is
``None`` or large, resumable upload will be used. Otherwise,
multipart upload will be used.
num_retries (Optional[int]): Number of upload retries. Defaults to 6.
job_id (Optional[str]): Name of the job.
job_id_prefix (Optional[str]):
The user-provided prefix for a randomly generated job ID.
This parameter will be ignored if a ``job_id`` is also given.
location (Optional[str]):
Location where to run the job. Must match the location of the
destination table.
project (Optional[str]):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (Optional[google.cloud.bigquery.job.LoadJobConfig]):
Extra configuration options for the job.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ValueError:
If ``size`` is not passed in and can not be determined, or if
the ``file_obj`` can be detected to be a file opened in text
mode.
TypeError:
If ``job_config`` is not an instance of :class:`~google.cloud.bigquery.job.LoadJobConfig`
class.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
destination = _table_arg_to_table_ref(destination, default_project=self.project)
job_ref = job._JobReference(job_id, project=project, location=location)
if job_config:
job_config = copy.deepcopy(job_config)
_verify_job_config_type(job_config, google.cloud.bigquery.job.LoadJobConfig)
load_job = job.LoadJob(job_ref, None, destination, self, job_config)
job_resource = load_job.to_api_repr()
if rewind:
file_obj.seek(0, os.SEEK_SET)
_check_mode(file_obj)
try:
if size is None or size >= _MAX_MULTIPART_SIZE:
response = self._do_resumable_upload(
file_obj, job_resource, num_retries, timeout, project=project
)
else:
response = self._do_multipart_upload(
file_obj, job_resource, size, num_retries, timeout, project=project
)
except resumable_media.InvalidResponse as exc:
raise exceptions.from_http_response(exc.response)
return self.job_from_resource(response.json())
def load_table_from_dataframe(
self,
dataframe,
destination,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
parquet_compression="snappy",
timeout=None,
):
"""Upload the contents of a table from a pandas DataFrame.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
.. note::
REPEATED fields are NOT supported when using the CSV source format.
They are supported when using the PARQUET source format, but
due to the way they are encoded in the ``parquet`` file,
a mismatch with the existing table schema can occur, so
100% compatibility cannot be guaranteed for REPEATED fields when
using the parquet format.
https://github.com/googleapis/python-bigquery/issues/17
Args:
dataframe (pandas.DataFrame):
A :class:`~pandas.DataFrame` containing the data to load.
destination (google.cloud.bigquery.table.TableReference):
The destination table to use for loading the data. If it is an
existing table, the schema of the :class:`~pandas.DataFrame`
must match the schema of the destination table. If the table
does not yet exist, the schema is inferred from the
:class:`~pandas.DataFrame`.
If a string is passed in, this method attempts to create a
table reference from a string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
num_retries (Optional[int]): Number of upload retries.
job_id (Optional[str]): Name of the job.
job_id_prefix (Optional[str]):
The user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (Optional[str]):
Location where to run the job. Must match the location of the
destination table.
project (Optional[str]):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (Optional[google.cloud.bigquery.job.LoadJobConfig]):
Extra configuration options for the job.
To override the default pandas data type conversions, supply
a value for
:attr:`~google.cloud.bigquery.job.LoadJobConfig.schema` with
column names matching those of the dataframe. The BigQuery
schema is used to determine the correct data type conversion.
Indexes are not loaded. Requires the :mod:`pyarrow` library.
By default, this method uses the parquet source format. To
override this, supply a value for
:attr:`~google.cloud.bigquery.job.LoadJobConfig.source_format`
with the format name. Currently only
:attr:`~google.cloud.bigquery.job.SourceFormat.CSV` and
:attr:`~google.cloud.bigquery.job.SourceFormat.PARQUET` are
supported.
parquet_compression (Optional[str]):
[Beta] The compression method to use if intermittently
serializing ``dataframe`` to a parquet file.
The argument is directly passed as the ``compression``
argument to the underlying ``pyarrow.parquet.write_table()``
method (the default value "snappy" gets converted to uppercase).
https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html#pyarrow-parquet-write-table
If the job config schema is missing, the argument is directly
passed as the ``compression`` argument to the underlying
``DataFrame.to_parquet()`` method.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_parquet.html#pandas.DataFrame.to_parquet
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ValueError:
If a usable parquet engine cannot be found. This method
requires :mod:`pyarrow` to be installed.
TypeError:
If ``job_config`` is not an instance of :class:`~google.cloud.bigquery.job.LoadJobConfig`
class.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if job_config:
_verify_job_config_type(job_config, google.cloud.bigquery.job.LoadJobConfig)
# Make a copy so that the job config isn't modified in-place.
job_config_properties = copy.deepcopy(job_config._properties)
job_config = job.LoadJobConfig()
job_config._properties = job_config_properties
else:
job_config = job.LoadJobConfig()
supported_formats = {job.SourceFormat.CSV, job.SourceFormat.PARQUET}
if job_config.source_format is None:
# default value
job_config.source_format = job.SourceFormat.PARQUET
if job_config.source_format not in supported_formats:
raise ValueError(
"Got unexpected source_format: '{}'. Currently, only PARQUET and CSV are supported".format(
job_config.source_format
)
)
if pyarrow is None and job_config.source_format == job.SourceFormat.PARQUET:
# pyarrow is now the only supported parquet engine.
raise ValueError("This method requires pyarrow to be installed")
if location is None:
location = self.location
# If table schema is not provided, we try to fetch the existing table
# schema, and check if dataframe schema is compatible with it - except
# for WRITE_TRUNCATE jobs, the existing schema does not matter then.
if (
not job_config.schema
and job_config.write_disposition != job.WriteDisposition.WRITE_TRUNCATE
):
try:
table = self.get_table(destination)
except google.api_core.exceptions.NotFound:
table = None
else:
columns_and_indexes = frozenset(
name
for name, _ in _pandas_helpers.list_columns_and_indexes(dataframe)
)
job_config.schema = [
# Field description and policy tags are not needed to
# serialize a data frame.
SchemaField(
field.name,
field.field_type,
mode=field.mode,
fields=field.fields,
)
# schema fields not present in the dataframe are not needed
for field in table.schema
if field.name in columns_and_indexes
]
job_config.schema = _pandas_helpers.dataframe_to_bq_schema(
dataframe, job_config.schema
)
if not job_config.schema:
# the schema could not be fully detected
warnings.warn(
"Schema could not be detected for all columns. Loading from a "
"dataframe without a schema will be deprecated in the future, "
"please provide a schema.",
PendingDeprecationWarning,
stacklevel=2,
)
tmpfd, tmppath = tempfile.mkstemp(
suffix="_job_{}.{}".format(job_id[:8], job_config.source_format.lower())
)
os.close(tmpfd)
try:
if job_config.source_format == job.SourceFormat.PARQUET:
if job_config.schema:
if parquet_compression == "snappy": # adjust the default value
parquet_compression = parquet_compression.upper()
_pandas_helpers.dataframe_to_parquet(
dataframe,
job_config.schema,
tmppath,
parquet_compression=parquet_compression,
)
else:
dataframe.to_parquet(tmppath, compression=parquet_compression)
else:
dataframe.to_csv(
tmppath,
index=False,
header=False,
encoding="utf-8",
float_format="%.17g",
date_format="%Y-%m-%d %H:%M:%S.%f",
)
with open(tmppath, "rb") as tmpfile:
file_size = os.path.getsize(tmppath)
return self.load_table_from_file(
tmpfile,
destination,
num_retries=num_retries,
rewind=True,
size=file_size,
job_id=job_id,
job_id_prefix=job_id_prefix,
location=location,
project=project,
job_config=job_config,
timeout=timeout,
)
finally:
os.remove(tmppath)
def load_table_from_json(
self,
json_rows,
destination,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
timeout=None,
):
"""Upload the contents of a table from a JSON string or dict.
Args:
json_rows (Iterable[Dict[str, Any]]):
Row data to be inserted. Keys must match the table schema fields
and values must be JSON-compatible representations.
.. note::
If your data is already a newline-delimited JSON string,
it is best to wrap it into a file-like object and pass it
to :meth:`~google.cloud.bigquery.client.Client.load_table_from_file`::
import io
from google.cloud import bigquery
data = u'{"foo": "bar"}'
data_as_file = io.StringIO(data)
client = bigquery.Client()
client.load_table_from_file(data_as_file, ...)
destination (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
num_retries (Optional[int]): Number of upload retries.
job_id (Optional[str]): Name of the job.
job_id_prefix (Optional[str]):
The user-provided prefix for a randomly generated job ID.
This parameter will be ignored if a ``job_id`` is also given.
location (Optional[str]):
Location where to run the job. Must match the location of the
destination table.
project (Optional[str]):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (Optional[google.cloud.bigquery.job.LoadJobConfig]):
Extra configuration options for the job. The ``source_format``
setting is always set to
:attr:`~google.cloud.bigquery.job.SourceFormat.NEWLINE_DELIMITED_JSON`.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
TypeError:
If ``job_config`` is not an instance of :class:`~google.cloud.bigquery.job.LoadJobConfig`
class.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if job_config:
_verify_job_config_type(job_config, google.cloud.bigquery.job.LoadJobConfig)
# Make a copy so that the job config isn't modified in-place.
job_config = copy.deepcopy(job_config)
else:
job_config = job.LoadJobConfig()
job_config.source_format = job.SourceFormat.NEWLINE_DELIMITED_JSON
if job_config.schema is None:
job_config.autodetect = True
if project is None:
project = self.project
if location is None:
location = self.location
destination = _table_arg_to_table_ref(destination, default_project=self.project)
data_str = "\n".join(json.dumps(item) for item in json_rows)
encoded_str = data_str.encode()
data_file = io.BytesIO(encoded_str)
return self.load_table_from_file(
data_file,
destination,
size=len(encoded_str),
num_retries=num_retries,
job_id=job_id,
job_id_prefix=job_id_prefix,
location=location,
project=project,
job_config=job_config,
timeout=timeout,
)
def _do_resumable_upload(
self, stream, metadata, num_retries, timeout, project=None
):
"""Perform a resumable upload.
Args:
stream (IO[bytes]): A bytes IO object open for reading.
metadata (Dict): The metadata associated with the upload.
num_retries (int):
Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
timeout (float):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
project (Optional[str]):
Project ID of the project of where to run the upload. Defaults
to the client's project.
Returns:
requests.Response:
The "200 OK" response object returned after the final chunk
is uploaded.
"""
upload, transport = self._initiate_resumable_upload(
stream, metadata, num_retries, timeout, project=project
)
while not upload.finished:
response = upload.transmit_next_chunk(transport)
return response
def _initiate_resumable_upload(
self, stream, metadata, num_retries, timeout, project=None
):
"""Initiate a resumable upload.
Args:
stream (IO[bytes]): A bytes IO object open for reading.
metadata (Dict): The metadata associated with the upload.
num_retries (int):
Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
timeout (float):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
project (Optional[str]):
Project ID of the project of where to run the upload. Defaults
to the client's project.
Returns:
Tuple:
Pair of
* The :class:`~google.resumable_media.requests.ResumableUpload`
that was created
* The ``transport`` used to initiate the upload.
"""
chunk_size = _DEFAULT_CHUNKSIZE
transport = self._http
headers = _get_upload_headers(self._connection.user_agent)
if project is None:
project = self.project
# TODO: Increase the minimum version of google-cloud-core to 1.6.0
# and remove this logic. See:
# https://github.com/googleapis/python-bigquery/issues/509
hostname = (
self._connection.API_BASE_URL
if not hasattr(self._connection, "get_api_base_url_for_mtls")
else self._connection.get_api_base_url_for_mtls()
)
upload_url = _RESUMABLE_URL_TEMPLATE.format(host=hostname, project=project)
# TODO: modify ResumableUpload to take a retry.Retry object
# that it can use for the initial RPC.
upload = ResumableUpload(upload_url, chunk_size, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
upload.initiate(
transport,
stream,
metadata,
_GENERIC_CONTENT_TYPE,
stream_final=False,
timeout=timeout,
)
return upload, transport
def _do_multipart_upload(
self, stream, metadata, size, num_retries, timeout, project=None
):
"""Perform a multipart upload.
Args:
stream (IO[bytes]): A bytes IO object open for reading.
metadata (Dict): The metadata associated with the upload.
size (int):
The number of bytes to be uploaded (which will be read
from ``stream``). If not provided, the upload will be
concluded once ``stream`` is exhausted (or :data:`None`).
num_retries (int):
Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
timeout (float):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
project (Optional[str]):
Project ID of the project of where to run the upload. Defaults
to the client's project.
Returns:
requests.Response:
The "200 OK" response object returned after the multipart
upload request.
Raises:
ValueError:
if the ``stream`` has fewer than ``size``
bytes remaining.
"""
data = stream.read(size)
if len(data) < size:
msg = _READ_LESS_THAN_SIZE.format(size, len(data))
raise ValueError(msg)
headers = _get_upload_headers(self._connection.user_agent)
if project is None:
project = self.project
# TODO: Increase the minimum version of google-cloud-core to 1.6.0
# and remove this logic. See:
# https://github.com/googleapis/python-bigquery/issues/509
hostname = (
self._connection.API_BASE_URL
if not hasattr(self._connection, "get_api_base_url_for_mtls")
else self._connection.get_api_base_url_for_mtls()
)
upload_url = _MULTIPART_URL_TEMPLATE.format(host=hostname, project=project)
upload = MultipartUpload(upload_url, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
response = upload.transmit(
self._http, data, metadata, _GENERIC_CONTENT_TYPE, timeout=timeout
)
return response
def copy_table(
self,
sources,
destination,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
timeout=None,
):
"""Copy one or more tables to another table.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationtablecopy
Args:
sources (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
Sequence[ \
Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
] \
], \
]):
Table or tables to be copied.
destination (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
Table into which data is to be copied.
Keyword Arguments:
job_id (Optional[str]): The ID of the job.
job_id_prefix (Optional[str]):
The user-provided prefix for a randomly generated job ID.
This parameter will be ignored if a ``job_id`` is also given.
location (Optional[str]):
Location where to run the job. Must match the location of any
source table as well as the destination table.
project (Optional[str]):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (Optional[google.cloud.bigquery.job.CopyJobConfig]):
Extra configuration options for the job.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.job.CopyJob: A new copy job instance.
Raises:
TypeError:
If ``job_config`` is not an instance of :class:`~google.cloud.bigquery.job.CopyJobConfig`
class.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
# sources can be one of many different input types. (string, Table,
# TableReference, or a sequence of any of those.) Convert them all to a
# list of TableReferences.
#
# _table_arg_to_table_ref leaves lists unmodified.
sources = _table_arg_to_table_ref(sources, default_project=self.project)
if not isinstance(sources, collections_abc.Sequence):
sources = [sources]
sources = [
_table_arg_to_table_ref(source, default_project=self.project)
for source in sources
]
destination = _table_arg_to_table_ref(destination, default_project=self.project)
if job_config:
_verify_job_config_type(job_config, google.cloud.bigquery.job.CopyJobConfig)
job_config = copy.deepcopy(job_config)
copy_job = job.CopyJob(
job_ref, sources, destination, client=self, job_config=job_config
)
copy_job._begin(retry=retry, timeout=timeout)
return copy_job
def extract_table(
self,
source,
destination_uris,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
timeout=None,
source_type="Table",
):
"""Start a job to extract a table into Cloud Storage files.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationextract
Args:
source (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
google.cloud.bigquery.model.Model, \
google.cloud.bigquery.model.ModelReference, \
src, \
]):
Table or Model to be extracted.
destination_uris (Union[str, Sequence[str]]):
URIs of Cloud Storage file(s) into which table data is to be
extracted; in format
``gs://<bucket_name>/<object_name_or_glob>``.
Keyword Arguments:
job_id (Optional[str]): The ID of the job.
job_id_prefix (Optional[str]):
The user-provided prefix for a randomly generated job ID.
This parameter will be ignored if a ``job_id`` is also given.
location (Optional[str]):
Location where to run the job. Must match the location of the
source table.
project (Optional[str]):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (Optional[google.cloud.bigquery.job.ExtractJobConfig]):
Extra configuration options for the job.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
source_type (Optional[str]):
Type of source to be extracted.``Table`` or ``Model``. Defaults to ``Table``.
Returns:
google.cloud.bigquery.job.ExtractJob: A new extract job instance.
Raises:
TypeError:
If ``job_config`` is not an instance of :class:`~google.cloud.bigquery.job.ExtractJobConfig`
class.
ValueError:
If ``source_type`` is not among ``Table``,``Model``.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
src = source_type.lower()
if src == "table":
source = _table_arg_to_table_ref(source, default_project=self.project)
elif src == "model":
source = _model_arg_to_model_ref(source, default_project=self.project)
else:
raise ValueError(
"Cannot pass `{}` as a ``source_type``, pass Table or Model".format(
source_type
)
)
if isinstance(destination_uris, str):
destination_uris = [destination_uris]
if job_config:
_verify_job_config_type(
job_config, google.cloud.bigquery.job.ExtractJobConfig
)
job_config = copy.deepcopy(job_config)
extract_job = job.ExtractJob(
job_ref, source, destination_uris, client=self, job_config=job_config
)
extract_job._begin(retry=retry, timeout=timeout)
return extract_job
def query(
self,
query,
job_config=None,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
retry=DEFAULT_RETRY,
timeout=None,
):
"""Run a SQL query.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationquery
Args:
query (str):
SQL query to be executed. Defaults to the standard SQL
dialect. Use the ``job_config`` parameter to change dialects.
Keyword Arguments:
job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
Extra configuration options for the job.
To override any options that were previously set in
the ``default_query_job_config`` given to the
``Client`` constructor, manually set those options to ``None``,
or whatever value is preferred.
job_id (Optional[str]): ID to use for the query job.
job_id_prefix (Optional[str]):
The prefix to use for a randomly generated job ID. This parameter
will be ignored if a ``job_id`` is also given.
location (Optional[str]):
Location where to run the job. Must match the location of the
any table used in the query as well as the destination table.
project (Optional[str]):
Project ID of the project of where to run the job. Defaults
to the client's project.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
google.cloud.bigquery.job.QueryJob: A new query job instance.
Raises:
TypeError:
If ``job_config`` is not an instance of :class:`~google.cloud.bigquery.job.QueryJobConfig`
class.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_config = copy.deepcopy(job_config)
if self._default_query_job_config:
if job_config:
_verify_job_config_type(
job_config, google.cloud.bigquery.job.QueryJobConfig
)
# anything that's not defined on the incoming
# that is in the default,
# should be filled in with the default
# the incoming therefore has precedence
job_config = job_config._fill_from_default(
self._default_query_job_config
)
else:
_verify_job_config_type(
self._default_query_job_config,
google.cloud.bigquery.job.QueryJobConfig,
)
job_config = copy.deepcopy(self._default_query_job_config)
job_ref = job._JobReference(job_id, project=project, location=location)
query_job = job.QueryJob(job_ref, query, client=self, job_config=job_config)
query_job._begin(retry=retry, timeout=timeout)
return query_job
def insert_rows(self, table, rows, selected_fields=None, **kwargs):
"""Insert rows into a table via the streaming API.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
Args:
table (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
The destination table for the row data, or a reference to it.
rows (Union[Sequence[Tuple], Sequence[Dict]]):
Row data to be inserted. If a list of tuples is given, each
tuple should contain data for each schema field on the
current table and in the same order as the schema fields. If
a list of dictionaries is given, the keys must include all
required fields in the schema. Keys which do not correspond
to a field in the schema are ignored.
selected_fields (Sequence[google.cloud.bigquery.schema.SchemaField]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
kwargs (Dict):
Keyword arguments to
:meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
Raises:
ValueError: if table's schema is not set or `rows` is not a `Sequence`.
"""
if not isinstance(rows, (collections_abc.Sequence, collections_abc.Iterator)):
raise TypeError("rows argument should be a sequence of dicts or tuples")
table = _table_arg_to_table(table, default_project=self.project)
if not isinstance(table, Table):
raise TypeError(_NEED_TABLE_ARGUMENT)
schema = table.schema
# selected_fields can override the table schema.
if selected_fields is not None:
schema = selected_fields
if len(schema) == 0:
raise ValueError(
(
"Could not determine schema for table '{}'. Call client.get_table() "
"or pass in a list of schema fields to the selected_fields argument."
).format(table)
)
json_rows = [_record_field_to_json(schema, row) for row in rows]
return self.insert_rows_json(table, json_rows, **kwargs)
def insert_rows_from_dataframe(
self, table, dataframe, selected_fields=None, chunk_size=500, **kwargs
):
"""Insert rows into a table from a dataframe via the streaming API.
Args:
table (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
The destination table for the row data, or a reference to it.
dataframe (pandas.DataFrame):
A :class:`~pandas.DataFrame` containing the data to load. Any
``NaN`` values present in the dataframe are omitted from the
streaming API request(s).
selected_fields (Sequence[google.cloud.bigquery.schema.SchemaField]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
chunk_size (int):
The number of rows to stream in a single chunk. Must be positive.
kwargs (Dict):
Keyword arguments to
:meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
Returns:
Sequence[Sequence[Mappings]]:
A list with insert errors for each insert chunk. Each element
is a list containing one mapping per row with insert errors:
the "index" key identifies the row, and the "errors" key
contains a list of the mappings describing one or more problems
with the row.
Raises:
ValueError: if table's schema is not set
"""
insert_results = []
chunk_count = int(math.ceil(len(dataframe) / chunk_size))
rows_iter = _pandas_helpers.dataframe_to_json_generator(dataframe)
for _ in range(chunk_count):
rows_chunk = itertools.islice(rows_iter, chunk_size)
result = self.insert_rows(table, rows_chunk, selected_fields, **kwargs)
insert_results.append(result)
return insert_results
def insert_rows_json(
self,
table,
json_rows,
row_ids=None,
skip_invalid_rows=None,
ignore_unknown_values=None,
template_suffix=None,
retry=DEFAULT_RETRY,
timeout=None,
):
"""Insert rows into a table without applying local type conversions.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
Args:
table (Union[ \
google.cloud.bigquery.table.Table \
google.cloud.bigquery.table.TableReference, \
str \
]):
The destination table for the row data, or a reference to it.
json_rows (Sequence[Dict]):
Row data to be inserted. Keys must match the table schema fields
and values must be JSON-compatible representations.
row_ids (Optional[Sequence[Optional[str]]]):
Unique IDs, one per row being inserted. An ID can also be
``None``, indicating that an explicit insert ID should **not**
be used for that row. If the argument is omitted altogether,
unique IDs are created automatically.
skip_invalid_rows (Optional[bool]):
Insert all valid rows of a request, even if invalid rows exist.
The default value is ``False``, which causes the entire request
to fail if any invalid rows exist.
ignore_unknown_values (Optional[bool]):
Accept rows that contain values that do not match the schema.
The unknown values are ignored. Default is ``False``, which
treats unknown values as errors.
template_suffix (Optional[str]):
Treat ``name`` as a template table and provide a suffix.
BigQuery will create the table ``<name> + <template_suffix>``
based on the schema of the template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
Raises:
TypeError: if `json_rows` is not a `Sequence`.
"""
if not isinstance(
json_rows, (collections_abc.Sequence, collections_abc.Iterator)
):
raise TypeError("json_rows argument should be a sequence of dicts")
# Convert table to just a reference because unlike insert_rows,
# insert_rows_json doesn't need the table schema. It's not doing any
# type conversions.
table = _table_arg_to_table_ref(table, default_project=self.project)
rows_info = []
data = {"rows": rows_info}
for index, row in enumerate(json_rows):
info = {"json": row}
if row_ids is not None:
info["insertId"] = row_ids[index]
else:
info["insertId"] = str(uuid.uuid4())
rows_info.append(info)
if skip_invalid_rows is not None:
data["skipInvalidRows"] = skip_invalid_rows
if ignore_unknown_values is not None:
data["ignoreUnknownValues"] = ignore_unknown_values
if template_suffix is not None:
data["templateSuffix"] = template_suffix
path = "%s/insertAll" % table.path
# We can always retry, because every row has an insert ID.
span_attributes = {"path": path}
response = self._call_api(
retry,
span_name="BigQuery.insertRowsJson",
span_attributes=span_attributes,
method="POST",
path=path,
data=data,
timeout=timeout,
)
errors = []
for error in response.get("insertErrors", ()):
errors.append({"index": int(error["index"]), "errors": error["errors"]})
return errors
def list_partitions(self, table, retry=DEFAULT_RETRY, timeout=None):
"""List the partitions in a table.
Args:
table (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
The table or reference from which to get partition info
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
If multiple requests are made under the hood, ``timeout``
applies to each individual request.
Returns:
List[str]:
A list of the partition ids present in the partitioned table
"""
table = _table_arg_to_table_ref(table, default_project=self.project)
meta_table = self.get_table(
TableReference(
DatasetReference(table.project, table.dataset_id),
"%s$__PARTITIONS_SUMMARY__" % table.table_id,
),
retry=retry,
timeout=timeout,
)
subset = [col for col in meta_table.schema if col.name == "partition_id"]
return [
row[0]
for row in self.list_rows(
meta_table, selected_fields=subset, retry=retry, timeout=timeout
)
]
def list_rows(
self,
table,
selected_fields=None,
max_results=None,
page_token=None,
start_index=None,
page_size=None,
retry=DEFAULT_RETRY,
timeout=None,
):
"""List the rows of the table.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list
.. note::
This method assumes that the provided schema is up-to-date with the
schema as defined on the back-end: if the two schemas are not
identical, the values returned may be incomplete. To ensure that the
local copy of the schema is up-to-date, call ``client.get_table``.
Args:
table (Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableListItem, \
google.cloud.bigquery.table.TableReference, \
str, \
]):
The table to list, or a reference to it. When the table
object does not contain a schema and ``selected_fields`` is
not supplied, this method calls ``get_table`` to fetch the
table schema.
selected_fields (Sequence[google.cloud.bigquery.schema.SchemaField]):
The fields to return. If not supplied, data for all columns
are downloaded.
max_results (Optional[int]):
Maximum number of rows to return.
page_token (Optional[str]):
Token representing a cursor into the table's rows.
If not passed, the API will return the first page of the
rows. The token marks the beginning of the iterator to be
returned and the value of the ``page_token`` can be accessed
at ``next_page_token`` of the
:class:`~google.cloud.bigquery.table.RowIterator`.
start_index (Optional[int]):
The zero-based index of the starting row to read.
page_size (Optional[int]):
The maximum number of rows in each page of results from this request.
Non-positive values are ignored. Defaults to a sensible value set by the API.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
If multiple requests are made under the hood, ``timeout``
applies to each individual request.
Returns:
google.cloud.bigquery.table.RowIterator:
Iterator of row data
:class:`~google.cloud.bigquery.table.Row`-s. During each
page, the iterator will have the ``total_rows`` attribute
set, which counts the total number of rows **in the table**
(this is distinct from the total number of rows in the
current page: ``iterator.page.num_items``).
"""
table = _table_arg_to_table(table, default_project=self.project)
if not isinstance(table, Table):
raise TypeError(_NEED_TABLE_ARGUMENT)
schema = table.schema
# selected_fields can override the table schema.
if selected_fields is not None:
schema = selected_fields
# No schema, but no selected_fields. Assume the developer wants all
# columns, so get the table resource for them rather than failing.
elif len(schema) == 0:
table = self.get_table(table.reference, retry=retry, timeout=timeout)
schema = table.schema
params = {}
if selected_fields is not None:
params["selectedFields"] = ",".join(field.name for field in selected_fields)
if start_index is not None:
params["startIndex"] = start_index
params["formatOptions.useInt64Timestamp"] = True
row_iterator = RowIterator(
client=self,
api_request=functools.partial(self._call_api, retry, timeout=timeout),
path="%s/data" % (table.path,),
schema=schema,
page_token=page_token,
max_results=max_results,
page_size=page_size,
extra_params=params,
table=table,
# Pass in selected_fields separately from schema so that full
# tables can be fetched without a column filter.
selected_fields=selected_fields,
total_rows=getattr(table, "num_rows", None),
)
return row_iterator
def _list_rows_from_query_results(
self,
job_id,
location,
project,
schema,
total_rows=None,
destination=None,
max_results=None,
start_index=None,
page_size=None,
retry=DEFAULT_RETRY,
timeout=None,
):
"""List the rows of a completed query.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults
Args:
job_id (str):
ID of a query job.
location (str): Location of the query job.
project (str):
ID of the project where the query job was run.
schema (Sequence[google.cloud.bigquery.schema.SchemaField]):
The fields expected in these query results. Used to convert
from JSON to expected Python types.
total_rows (Optional[int]):
Total number of rows in the query results.
destination (Optional[Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableListItem, \
google.cloud.bigquery.table.TableReference, \
str, \
]]):
Destination table reference. Used to fetch the query results
with the BigQuery Storage API.
max_results (Optional[int]):
Maximum number of rows to return across the whole iterator.
start_index (Optional[int]):
The zero-based index of the starting row to read.
page_size (Optional[int]):
The maximum number of rows in each page of results from this request.
Non-positive values are ignored. Defaults to a sensible value set by the API.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``. If set, this connection timeout may be
increased to a minimum value. This prevents retries on what
would otherwise be a successful response.
If multiple requests are made under the hood, ``timeout``
applies to each individual request.
Returns:
google.cloud.bigquery.table.RowIterator:
Iterator of row data
:class:`~google.cloud.bigquery.table.Row`-s.
"""
params = {
"fields": _LIST_ROWS_FROM_QUERY_RESULTS_FIELDS,
"location": location,
}
if timeout is not None:
timeout = max(timeout, _MIN_GET_QUERY_RESULTS_TIMEOUT)
if start_index is not None:
params["startIndex"] = start_index
params["formatOptions.useInt64Timestamp"] = True
row_iterator = RowIterator(
client=self,
api_request=functools.partial(self._call_api, retry, timeout=timeout),
path=f"/projects/{project}/queries/{job_id}",
schema=schema,
max_results=max_results,
page_size=page_size,
table=destination,
extra_params=params,
total_rows=total_rows,
)
return row_iterator
def _schema_from_json_file_object(self, file_obj):
"""Helper function for schema_from_json that takes a
file object that describes a table schema.
Returns:
List of schema field objects.
"""
json_data = json.load(file_obj)
return [SchemaField.from_api_repr(field) for field in json_data]
def _schema_to_json_file_object(self, schema_list, file_obj):
"""Helper function for schema_to_json that takes a schema list and file
object and writes the schema list to the file object with json.dump
"""
json.dump(schema_list, file_obj, indent=2, sort_keys=True)
def schema_from_json(self, file_or_path):
"""Takes a file object or file path that contains json that describes
a table schema.
Returns:
List of schema field objects.
"""
if isinstance(file_or_path, io.IOBase):
return self._schema_from_json_file_object(file_or_path)
with open(file_or_path) as file_obj:
return self._schema_from_json_file_object(file_obj)
def schema_to_json(self, schema_list, destination):
"""Takes a list of schema field objects.
Serializes the list of schema field objects as json to a file.
Destination is a file path or a file object.
"""
json_schema_list = [f.to_api_repr() for f in schema_list]
if isinstance(destination, io.IOBase):
return self._schema_to_json_file_object(json_schema_list, destination)
with open(destination, mode="w") as file_obj:
return self._schema_to_json_file_object(json_schema_list, file_obj)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
# pylint: disable=unused-argument
def _item_to_project(iterator, resource):
"""Convert a JSON project to the native object.
Args:
iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
resource (Dict): An item to be converted to a project.
Returns:
google.cloud.bigquery.client.Project: The next project in the page.
"""
return Project.from_api_repr(resource)
# pylint: enable=unused-argument
def _item_to_dataset(iterator, resource):
"""Convert a JSON dataset to the native object.
Args:
iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
resource (Dict): An item to be converted to a dataset.
Returns:
google.cloud.bigquery.dataset.DatasetListItem: The next dataset in the page.
"""
return DatasetListItem(resource)
def _item_to_job(iterator, resource):
"""Convert a JSON job to the native object.
Args:
iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
resource (Dict): An item to be converted to a job.
Returns:
job instance: The next job in the page.
"""
return iterator.client.job_from_resource(resource)
def _item_to_model(iterator, resource):
"""Convert a JSON model to the native object.
Args:
iterator (google.api_core.page_iterator.Iterator):
The iterator that is currently in use.
resource (Dict): An item to be converted to a model.
Returns:
google.cloud.bigquery.model.Model: The next model in the page.
"""
return Model.from_api_repr(resource)
def _item_to_routine(iterator, resource):
"""Convert a JSON model to the native object.
Args:
iterator (google.api_core.page_iterator.Iterator):
The iterator that is currently in use.
resource (Dict): An item to be converted to a routine.
Returns:
google.cloud.bigquery.routine.Routine: The next routine in the page.
"""
return Routine.from_api_repr(resource)
def _item_to_table(iterator, resource):
"""Convert a JSON table to the native object.
Args:
iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
resource (Dict): An item to be converted to a table.
Returns:
google.cloud.bigquery.table.Table: The next table in the page.
"""
return TableListItem(resource)
def _make_job_id(job_id, prefix=None):
"""Construct an ID for a new job.
Args:
job_id (Optional[str]): the user-provided job ID.
prefix (Optional[str]): the user-provided prefix for a job ID.
Returns:
str: A job ID
"""
if job_id is not None:
return job_id
elif prefix is not None:
return str(prefix) + str(uuid.uuid4())
else:
return str(uuid.uuid4())
def _check_mode(stream):
"""Check that a stream was opened in read-binary mode.
Args:
stream (IO[bytes]): A bytes IO object open for reading.
Raises:
ValueError:
if the ``stream.mode`` is a valid attribute
and is not among ``rb``, ``r+b`` or ``rb+``.
"""
mode = getattr(stream, "mode", None)
if isinstance(stream, gzip.GzipFile):
if mode != gzip.READ:
raise ValueError(
"Cannot upload gzip files opened in write mode: use "
"gzip.GzipFile(filename, mode='rb')"
)
else:
if mode is not None and mode not in ("rb", "r+b", "rb+"):
raise ValueError(
"Cannot upload files opened in text mode: use "
"open(filename, mode='rb') or open(filename, mode='r+b')"
)
def _get_upload_headers(user_agent):
"""Get the headers for an upload request.
Args:
user_agent (str): The user-agent for requests.
Returns:
Dict: The headers to be used for the request.
"""
return {
"Accept": "application/json",
"Accept-Encoding": "gzip, deflate",
"User-Agent": user_agent,
"content-type": "application/json",
}
| 38.08794 | 136 | 0.583005 |
5f46d38f3da6709b29d5c1e40be2f89402106e75 | 1,921 | py | Python | mindspore/ops/_op_impl/tbe/tensor_scatter_update.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | mindspore/ops/_op_impl/tbe/tensor_scatter_update.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | mindspore/ops/_op_impl/tbe/tensor_scatter_update.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorScatterUpdate op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
tensor_scatter_update_op_info = TBERegOp("TensorScatterUpdate") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("scatter_update.so") \
.compute_cost(10) \
.kernel_name("scatter_update") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.input(1, "indices", False, "required", "all") \
.input(1, "updates", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I8_Default, DataType.I32_Default, DataType.I8_Default, DataType.I8_Default) \
.dtype_format(DataType.U8_Default, DataType.I32_Default, DataType.U8_Default, DataType.U8_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.get_op_info()
@op_info_register(tensor_scatter_update_op_info)
def _tensor_scatter_update_tbe():
"""TensorScatterUpdate TBE register"""
return
| 45.738095 | 107 | 0.716294 |
0e7f59c52e088fd8d5ad71af9da7c06a97bfaecb | 88,190 | py | Python | budgetportal/models.py | TomaszKolek/datamanager | d46dbab00e30a14fc26eb9368c32dcdbbda7309d | [
"MIT"
] | null | null | null | budgetportal/models.py | TomaszKolek/datamanager | d46dbab00e30a14fc26eb9368c32dcdbbda7309d | [
"MIT"
] | null | null | null | budgetportal/models.py | TomaszKolek/datamanager | d46dbab00e30a14fc26eb9368c32dcdbbda7309d | [
"MIT"
] | null | null | null | import logging
import re
import uuid
from datetime import datetime
from decimal import Decimal
from pprint import pformat
from urllib.parse import quote
import requests
from slugify import slugify
from adminsortable.models import SortableMixin
from autoslug import AutoSlugField
from budgetportal.datasets import Dataset, get_expenditure_time_series_dataset
from django.conf import settings
from django.core.exceptions import MultipleObjectsReturned
from django.core.cache import cache
from django.urls import reverse
from partial_index import PartialIndex
from blocks import SectionBlock
from budgetportal import nav_bar
from collections import OrderedDict
from django.core.exceptions import ValidationError
from django.db import models
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.core import blocks as wagtail_blocks
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Page
logger = logging.getLogger(__name__)
ckan = settings.CKAN
URL_LENGTH_LIMIT = 2000
CKAN_DATASTORE_URL = settings.CKAN_URL + "/api/3/action" "/datastore_search_sql"
MAPIT_POINT_API_URL = "https://mapit.code4sa.org/point/4326/{},{}"
DIRECT_CHARGE_NRF = "Direct charge against the National Revenue Fund"
prov_abbrev = {
"Eastern Cape": "EC",
"Free State": "FS",
"Gauteng": "GT",
"KwaZulu-Natal": "NL",
"Limpopo": "LP",
"Mpumalanga": "MP",
"Northern Cape": "NC",
"North West": "NW",
"Western Cape": "WC",
}
# Budget Phase IDs for the 7-year overview period
TRENDS_AND_ESTIMATES_PHASES = [
"Audited Outcome",
"Audited Outcome",
"Audited Outcome",
"Adjusted appropriation",
"Main appropriation",
"Medium Term Estimates",
"Medium Term Estimates",
]
EXPENDITURE_TIME_SERIES_PHASES = (
"Main appropriation",
"Adjusted appropriation",
"Final Appropriation",
"Audit Outcome",
)
EXPENDITURE_TIME_SERIES_PHASE_MAPPING = {
"original": "Main appropriation",
"adjusted": "Adjusted appropriation",
"actual": "Audit Outcome",
}
class Homepage(models.Model):
main_heading = models.CharField(max_length=1000, blank=True)
sub_heading = models.CharField(max_length=1000, blank=True)
primary_button_label = models.CharField(max_length=1000, blank=True)
primary_button_url = models.CharField(max_length=1000, blank=True)
primary_button_target = models.CharField(max_length=1000, blank=True)
secondary_button_label = models.CharField(max_length=1000, blank=True)
secondary_button_url = models.CharField(max_length=1000, blank=True)
secondary_button_target = models.CharField(max_length=1000, blank=True)
call_to_action_sub_heading = models.CharField(max_length=1000, blank=True)
call_to_action_heading = models.CharField(max_length=1000, blank=True)
call_to_action_link_label = models.CharField(max_length=1000, blank=True)
call_to_action_link_url = models.CharField(max_length=1000, blank=True)
call_to_action_link_target = models.CharField(max_length=1000, blank=True)
class FinancialYear(models.Model):
organisational_unit = "financial_year"
slug = models.SlugField(max_length=7, unique=True)
published = models.BooleanField(default=False)
_consolidated_expenditure_budget_dataset = None
class Meta:
ordering = ["-slug"]
@property
def national(self):
return self.spheres.filter(slug="national")[0]
@property
def provincial(self):
return self.spheres.filter(slug="provincial")[0]
def get_url_path(self):
return "/%s" % self.slug
def get_starting_year(self):
return self.slug[:4]
@staticmethod
def slug_from_year_start(year):
return year + "-" + str(int(year[2:]) + 1)
@staticmethod
def start_from_year_slug(slug):
return slug[:4]
def get_sphere(self, name):
return getattr(self, name)
def get_closest_match(self, department):
sphere = self.spheres.filter(slug=department.government.sphere.slug).first()
government = sphere.governments.filter(slug=department.government.slug).first()
department = government.departments.filter(slug=department.slug).first()
if not department:
return government, False
return department, True
@classmethod
def get_available_years(cls):
years = list(cls.objects.filter(published=True).order_by("-slug")[:4])
years.reverse()
return years
@classmethod
def get_latest_year(cls):
return cls.objects.filter(published=True).order_by("-slug")[0]
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.get_url_path())
class Sphere(models.Model):
organisational_unit = "sphere"
name = models.CharField(max_length=200)
slug = AutoSlugField(populate_from="name", max_length=200, always_update=True)
financial_year = models.ForeignKey(
FinancialYear, on_delete=models.CASCADE, related_name="spheres"
)
class Meta:
unique_together = (("financial_year", "slug"), ("financial_year", "name"))
ordering = ["-financial_year__slug", "name"]
def get_url_path(self):
return "%s/%s" % (self.financial_year.get_url_path(), self.slug)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.get_url_path())
class Government(models.Model):
organisational_unit = "government"
sphere = models.ForeignKey(
Sphere, on_delete=models.CASCADE, related_name="governments"
)
name = models.CharField(max_length=200)
slug = AutoSlugField(populate_from="name", max_length=200, always_update=True)
_function_budgets = None
class Meta:
unique_together = (("sphere", "slug"), ("sphere", "name"))
def get_url_path(self):
if self.sphere.slug == "national":
return self.sphere.get_url_path()
else:
return "%s/%s" % (self.sphere.get_url_path(), self.slug)
def get_department_by_slug(self, slug):
departments = self.departments.objects.filter(slug=slug)
if departments.count() == 0:
return None
elif departments.count() == 1:
return departments.first()
else:
raise Exception("More matching slugs than expected")
def get_vote_primary_departments(self):
return Department.objects.filter(government=self, is_vote_primary=True).all()
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.get_url_path())
class Department(models.Model):
organisational_unit = "department"
government = models.ForeignKey(
Government, on_delete=models.CASCADE, related_name="departments"
)
name = models.CharField(
max_length=200,
help_text="The department name must precisely match the text used in the Appropriation "
"Bill. All datasets must be normalised to match this name. Beware that changing "
"this name might cause a mismatch with already-published datasets which might "
"need to be update to match this.",
)
slug = AutoSlugField(
populate_from="name", max_length=200, always_update=True, editable=True
)
vote_number = models.IntegerField()
is_vote_primary = models.BooleanField(default=True)
intro = models.TextField()
website_url = models.URLField(default=None, null=True, blank=True)
def __init__(self, *args, **kwargs):
self._programme_budgets = None
self._econ_by_programme_budgets = None
self._prog_by_econ_budgets = None
self._adjusted_estimates_of_expenditure_dataset = None
self._estimates_of_econ_classes_expenditure_dataset = {}
self._estimates_of_subprogramme_expenditure_dataset = None
self._expenditure_time_series_dataset = None
super(Department, self).__init__(*args, **kwargs)
class Meta:
unique_together = (("government", "slug"), ("government", "name"))
indexes = [
PartialIndex(
fields=["government", "vote_number"],
unique=True,
where="is_vote_primary",
)
]
ordering = ["vote_number", "name"]
def clean(self):
# This is only for user feedback in admin.
# The constraint must be enforced elsewhere.
existing_vote_primary = Department.objects.filter(
government=self.government, vote_number=self.vote_number
)
if (
self.is_vote_primary
and existing_vote_primary
and existing_vote_primary.first() != self
):
raise ValidationError(
"There is already a primary department for "
"vote %d" % self.vote_number
)
@classmethod
def get_in_latest_government(cls, name, government_name):
"""
Get a department instance whose slug matches the provided name slugified,
in the government with the provided name in the latest financial year.
Returns None if a matching department is not found.
"""
try:
return cls.objects.filter(
slug=slugify(name), government__name=government_name
).order_by("-government__sphere__financial_year__slug")[0]
except IndexError:
return None
def create_dataset(self, name, title, group_name):
vocab_map = get_vocab_map()
tags = [
{
"vocabulary_id": vocab_map["spheres"],
"name": self.government.sphere.slug,
},
{
"vocabulary_id": vocab_map["financial_years"],
"name": self.get_financial_year().slug,
},
]
if self.government.sphere.slug == "provincial":
tags.append(
{"vocabulary_id": vocab_map["provinces"], "name": self.government.name}
)
dataset_fields = {
"title": title,
"name": name,
"groups": [{"name": group_name}],
"extras": [
{"key": "department_name", "value": self.name},
{"key": "Department Name", "value": self.name},
{"key": "department_name_slug", "value": self.slug},
{"key": "Vote Number", "value": self.vote_number},
{"key": "vote_number", "value": self.vote_number},
{"key": "geographic_region_slug", "value": self.government.slug},
{"key": "organisational_unit", "value": "department"},
],
"owner_org": "national-treasury",
"license_id": "other-pd",
"tags": tags,
}
logger.info("Creating package with %r", dataset_fields)
return Dataset.from_package(ckan.action.package_create(**dataset_fields))
def get_latest_website_url(self):
""" Always return the latest available non-null URL, even for old departments. """
newer_departments = Department.objects.filter(
government__slug=self.government.slug,
government__sphere__slug=self.government.sphere.slug,
slug=self.slug,
website_url__isnull=False,
).order_by("-government__sphere__financial_year__slug")
return newer_departments.first().website_url if newer_departments else None
def get_url_path(self):
""" e.g. 2018-19/national/departments/military-veterans """
return "%s/departments/%s" % (self.government.get_url_path(), self.slug)
def get_preview_url_path(self):
""" e.g. 2018-19/previews/national/south-africa/agriculture-and-fisheries """
return "%s/previews/%s/%s/%s" % (
self.government.sphere.financial_year.slug,
self.government.sphere.slug,
self.government.slug,
self.slug,
)
def get_govt_functions(self):
return GovtFunction.objects.filter(programme__department=self).distinct()
def get_financial_year(self):
return self.government.sphere.financial_year
def get_latest_department_instance(self):
""" Try to find the department in the most recent year with the same slug.
Continue traversing backwards in time until found, or until the original year has been reached. """
newer_departments = Department.objects.filter(
government__slug=self.government.slug,
government__sphere__slug=self.government.sphere.slug,
slug=self.slug,
).order_by("-government__sphere__financial_year__slug")
return newer_departments.first() if newer_departments else None
def _get_financial_year_query(self):
return '+vocab_financial_years:"%s"' % self.get_financial_year().slug
def _get_government_query(self):
if self.government.sphere.slug == "provincial":
return '+vocab_provinces:"%s"' % self.government.name
else:
return none_selected_query("vocab_provinces")
def get_primary_department(self):
"""
Check if department is primary
"""
if not self.is_vote_primary:
try:
dept = Department.objects.get(
vote_number=self.vote_number,
is_vote_primary=True,
government=self.government,
)
except MultipleObjectsReturned:
logger.exception(
"Department %s has multiple primary " "departments" % self.slug
)
raise
else:
return dept
return self
def get_dataset(self, group_name, name=None):
"""
Get a dataset correctly annotated to match this department.
If name isn't provided, still assume there's just one dataset
in the specified group categorised to match this department.
"""
query = {
"q": "",
"fq": (
'+organization:"national-treasury"'
'+vocab_financial_years:"%s"'
'+vocab_spheres:"%s"'
'+extras_s_geographic_region_slug:"%s"'
'+extras_s_department_name_slug:"%s"'
'+groups:"%s"'
)
% (
self.government.sphere.financial_year.slug,
self.government.sphere.slug,
self.government.slug,
self.get_primary_department().slug,
group_name,
),
"rows": 1,
}
if name:
query["fq"] += '+name:"%s"' % name
response = ckan.action.package_search(**query)
logger.info(
"query %s\nreturned %d results", pformat(query), len(response["results"])
)
if response["results"]:
assert len(response["results"]) == 1
return Dataset.from_package(response["results"][0])
def _get_functions_query(self):
function_names = [f.name for f in self.get_govt_functions()]
ckan_tag_names = [re.sub("[^\w -]", "", n) for n in function_names]
if len(ckan_tag_names) == 0:
# We select datasets with no functions rather than datasets
# with any function (e.g. a blank query) because this query
# is intended to restrict datasets to matching functions.
return none_selected_query("vocab_functions")
else:
options = ['+vocab_functions:"%s"' % n for n in ckan_tag_names]
return "+(%s)" % " OR ".join(options)
def get_contributed_datasets(self):
# We use an OrderedDict like an Ordered Set to ensure we include each
# match just once, and at the highest rank it came up in.
datasets = OrderedDict()
fq_org = '-organization:"national-treasury"'
fq_group = '+(*:* NOT groups:["" TO *])'
fq_year = self._get_financial_year_query()
fq_sphere = '+vocab_spheres:"%s"' % self.government.sphere.slug
fq_government = self._get_government_query()
fq_functions = self._get_functions_query()
fq_no_functions = none_selected_query("vocab_functions")
queries = [
(fq_org, fq_group, fq_year, fq_sphere, fq_government, fq_functions),
(fq_org, fq_group, fq_sphere, fq_government, fq_functions),
(fq_org, fq_group, fq_year, fq_sphere, fq_functions),
(fq_org, fq_group, fq_sphere, fq_functions),
(fq_org, fq_group, fq_functions),
(fq_org, fq_group, fq_no_functions),
]
for query in queries:
params = {"q": "", "fq": "".join(query), "rows": 1000}
response = ckan.action.package_search(**params)
logger.info(
"query %s\nto ckan returned %d results",
pformat(params),
len(response["results"]),
)
for package in response["results"]:
if package["name"] not in datasets:
dataset = Dataset.from_package(package)
datasets[package["name"]] = dataset
return datasets.values()
def get_estimates_of_econ_classes_expenditure_dataset(self, level=3):
if (
self._estimates_of_econ_classes_expenditure_dataset.get(level, None)
is not None
):
return self._estimates_of_econ_classes_expenditure_dataset[level]
query = {
"q": "",
"fq": "".join(
[
'+organization:"national-treasury"',
'+groups:"estimates-of-%s-expenditure"'
% self.government.sphere.slug,
'+vocab_financial_years:"%s"' % self.get_financial_year().slug,
'+vocab_dimensions:"Economic classification %d"' % level,
]
),
"rows": 1000,
"rows": 1000,
}
response = ckan.action.package_search(**query)
logger.info(
"query %s\nreturned %d results", pformat(query), len(response["results"])
)
if response["results"]:
package = response["results"][0]
self._estimates_of_econ_classes_expenditure_dataset[
level
] = Dataset.from_package(package)
return self._estimates_of_econ_classes_expenditure_dataset[level]
else:
return None
def get_adjusted_estimates_expenditure_dataset(self):
if self._adjusted_estimates_of_expenditure_dataset is not None:
return self._adjusted_estimates_of_expenditure_dataset
query = {
"q": "",
"fq": "".join(
[
'+organization:"national-treasury"',
'+groups:"adjusted-estimates-of-%s-expenditure"'
% self.government.sphere.slug,
'+vocab_financial_years:"%s"' % self.get_financial_year().slug,
]
),
"rows": 1000,
}
response = ckan.action.package_search(**query)
logger.info(
"query %s\nreturned %d results", pformat(query), len(response["results"])
)
if response["results"]:
package = response["results"][0]
self._adjusted_estimates_of_expenditure_dataset = Dataset.from_package(
package
)
return self._adjusted_estimates_of_expenditure_dataset
else:
return None
def get_estimates_of_subprogramme_expenditure_dataset(self):
"""
Gets the dataset that should have this department's budget data
for this year down to sub-programme level
"""
if self._estimates_of_subprogramme_expenditure_dataset is not None:
return self._estimates_of_subprogramme_expenditure_dataset
query = {
"q": "",
"fq": "".join(
[
'+organization:"national-treasury"',
'+groups:"estimates-of-%s-expenditure"'
% self.government.sphere.slug,
'+vocab_financial_years:"%s"' % self.get_financial_year().slug,
'+vocab_dimensions:"Sub-programme"',
]
),
"rows": 1000,
}
response = ckan.action.package_search(**query)
if response["results"]:
package = response["results"][0]
self._estimates_of_subprogramme_expenditure_dataset = Dataset.from_package(
package
)
return self._estimates_of_subprogramme_expenditure_dataset
else:
return None
def get_expenditure_over_time(self):
cpi_year_slug, cpi_resource_id = Dataset.get_latest_cpi_resource()
base_year = get_base_year(cpi_year_slug)
financial_year_start = self.get_financial_year().get_starting_year()
financial_year_start_int = int(financial_year_start)
financial_year_starts = [
str(y)
for y in range(financial_year_start_int - 4, financial_year_start_int + 3)
]
expenditure = {
"base_financial_year": FinancialYear.slug_from_year_start(str(base_year)),
"nominal": [],
"real": [],
}
dataset = self.get_estimates_of_econ_classes_expenditure_dataset()
if not dataset:
return None
openspending_api = dataset.get_openspending_api()
cuts = []
if self.government.sphere.slug == "provincial":
cuts.append(openspending_api.get_geo_ref() + ':"%s"' % self.government.name)
drilldowns = [
openspending_api.get_financial_year_ref(),
openspending_api.get_phase_ref(),
openspending_api.get_department_name_ref(),
]
budget_results = openspending_api.aggregate(cuts=cuts, drilldowns=drilldowns)
result = openspending_api.filter_dept(budget_results, self.name)
if result["cells"]:
cpi = get_cpi()
for idx, financial_year_start in enumerate(financial_year_starts):
phase = TRENDS_AND_ESTIMATES_PHASES[idx]
cell = [
c
for c in result["cells"]
if c[openspending_api.get_financial_year_ref()]
== int(financial_year_start)
and c[openspending_api.get_phase_ref()] == phase
][0]
nominal = cell["value.sum"]
expenditure["nominal"].append(
{
"financial_year": FinancialYear.slug_from_year_start(
financial_year_start
),
"amount": nominal,
"phase": phase,
}
)
expenditure["real"].append(
{
"financial_year": FinancialYear.slug_from_year_start(
financial_year_start
),
"amount": int(
(Decimal(nominal) / cpi[financial_year_start]["index"])
* 100
),
"phase": phase,
}
)
return {
"expenditure": expenditure,
"dataset_detail_page": dataset.get_url_path(),
}
else:
logger.warning(
"Missing expenditure data for %r budget year %s",
cuts,
self.get_financial_year().slug,
)
return None
def get_adjusted_budget_summary(self):
dataset = self.get_adjusted_estimates_expenditure_dataset()
if not dataset:
return None
openspending_api = dataset.get_openspending_api()
if not openspending_api:
return None
result_for_type_and_total = openspending_api.aggregate(
cuts=[
openspending_api.get_financial_year_ref()
+ ":"
+ self.get_financial_year().get_starting_year()
],
drilldowns=[
openspending_api.get_adjustment_kind_ref(),
openspending_api.get_phase_ref(),
openspending_api.get_programme_name_ref(),
openspending_api.get_department_name_ref(),
],
order=[openspending_api.get_adjustment_kind_ref()],
)
result_for_type_and_total = openspending_api.filter_dept(
result_for_type_and_total, self.name
)
filtered_cells = openspending_api.filter_by_ref_exclusion(
result_for_type_and_total["cells"],
openspending_api.get_programme_name_ref(),
DIRECT_CHARGE_NRF,
)
cells_for_type_and_total = openspending_api.aggregate_by_refs(
[
openspending_api.get_adjustment_kind_ref(),
openspending_api.get_phase_ref(),
],
filtered_cells,
)
if not cells_for_type_and_total:
return None
total_voted, total_adjusted = self._get_total_budget_adjustment(
openspending_api, cells_for_type_and_total
)
dept_aggregate_url = openspending_api.aggregate_url(
cuts=[
openspending_api.get_financial_year_ref()
+ ":"
+ self.get_financial_year().get_starting_year(),
openspending_api.get_department_name_ref() + ":" + self.name,
],
drilldowns=openspending_api.get_all_drilldowns(),
)
return {
"by_type": self._get_adjustments_by_type(
openspending_api, cells_for_type_and_total
),
"total_change": {
"amount": total_adjusted,
"percentage": (float(total_adjusted) / float(total_voted)) * 100,
},
"econ_classes": self._get_adjustments_by_econ_class(openspending_api),
"programmes": self._get_adjustments_by_programme(openspending_api),
"virements": self._get_budget_virements(
openspending_api, dataset, total_voted
),
"special_appropriation": self._get_budget_special_appropriations(
openspending_api, total_voted
),
"direct_charges": self._get_budget_direct_charges(openspending_api),
"department_data_csv": csv_url(dept_aggregate_url),
"dataset_detail_page": dataset.get_url_path(),
}
def _get_adjustments_by_type(self, openspending_api, cells):
budget_phase_ref = openspending_api.get_phase_ref()
adjustment_kind_ref = openspending_api.get_adjustment_kind_ref()
def filter_by_type(cell):
types = [
"Adjustments - Announced in the budget speech",
"Adjustments - Declared unspent funds",
"Adjustments - Emergency funding",
"Adjustments - Roll-overs",
"Adjustments - Self-financing",
"Adjustments - Shifting of functions between votes",
"Adjustments - Shifting of functions within the vote",
"Adjustments - Significant and unforeseeable economic and financial events",
"Adjustments - Unforeseeable/unavoidable",
"Adjustments - Virements and shifts due to savings",
]
whitelist = {"Adjusted appropriation": types}
whitelist_keys = whitelist.keys()
phase = cell[budget_phase_ref]
descript = cell[adjustment_kind_ref]
if phase in whitelist_keys:
if descript in whitelist[phase]:
return True
return False
cells_by_type = [c for c in cells if filter_by_type(c)]
by_type = []
for cell in cells_by_type:
name = cell[adjustment_kind_ref]
name = name.replace("Adjustments - ", "")
if cell["value.sum"]:
by_type.append(
{"name": name, "amount": cell["value.sum"], "type": "kind"}
)
return by_type if by_type else None
def _get_adjustments_by_programme(self, openspending_api):
result_for_programmes = openspending_api.aggregate(
cuts=[
openspending_api.get_financial_year_ref()
+ ":"
+ self.get_financial_year().get_starting_year(),
openspending_api.get_department_name_ref() + ':"' + self.name + '"',
openspending_api.get_adjustment_kind_ref()
+ ":"
+ '"Adjustments - Total adjustments"',
],
drilldowns=[
openspending_api.get_programme_name_ref(),
openspending_api.get_phase_ref(),
openspending_api.get_department_name_ref(),
],
order=[openspending_api.get_programme_name_ref()],
)
result_for_programmes = openspending_api.filter_dept(
result_for_programmes, self.name
)
programme_name_ref = openspending_api.get_programme_name_ref()
cells_for_programmes = openspending_api.filter_by_ref_exclusion(
result_for_programmes["cells"], programme_name_ref, DIRECT_CHARGE_NRF
)
programmes = [
{"name": cell[programme_name_ref], "amount": cell["value.sum"]}
for cell in cells_for_programmes
if cell["value.sum"]
]
return programmes if programmes else None
def _get_adjustments_by_econ_class(self, openspending_api):
result_for_econ_classes = openspending_api.aggregate(
cuts=[
openspending_api.get_financial_year_ref()
+ ":"
+ self.get_financial_year().get_starting_year(),
openspending_api.get_adjustment_kind_ref()
+ ":"
+ '"Adjustments - Total adjustments"',
],
drilldowns=[
openspending_api.get_econ_class_2_ref(),
openspending_api.get_econ_class_3_ref(),
openspending_api.get_programme_name_ref(),
openspending_api.get_department_name_ref(),
],
order=[
openspending_api.get_econ_class_2_ref(),
openspending_api.get_econ_class_3_ref(),
],
)
result_for_econ_classes = openspending_api.filter_dept(
result_for_econ_classes, self.name
)
econ_classes = dict()
econ_class_2_ref = openspending_api.get_econ_class_2_ref()
econ_class_3_ref = openspending_api.get_econ_class_3_ref()
filtered_cells = openspending_api.filter_by_ref_exclusion(
result_for_econ_classes["cells"],
openspending_api.get_programme_name_ref(),
DIRECT_CHARGE_NRF,
)
cells_for_econ_classes = openspending_api.aggregate_by_refs(
[
openspending_api.get_econ_class_2_ref(),
openspending_api.get_econ_class_3_ref(),
],
filtered_cells,
)
for cell in cells_for_econ_classes:
new_econ_2_object = {
"type": "economic_classification_3",
"name": cell[econ_class_3_ref],
"amount": cell["value.sum"],
}
if cell["value.sum"] != 0:
if cell[econ_class_2_ref] not in econ_classes.keys():
econ_classes[cell[econ_class_2_ref]] = {
"type": "economic_classification_2",
"name": cell[econ_class_2_ref],
"items": [],
}
econ_classes[cell[econ_class_2_ref]]["items"].append(new_econ_2_object)
# sort by name
name_func = lambda x: x["name"]
for econ_2_name in list(
econ_classes.keys()
): # Copy keys because we're updating dict
econ_classes[econ_2_name]["items"] = sorted(
econ_classes[econ_2_name]["items"], key=name_func
)
econ_classes_list = sorted(econ_classes.values(), key=name_func)
return econ_classes_list if econ_classes_list else None
@staticmethod
def _get_total_budget_adjustment(openspending_api, cells):
if not cells:
return None
phase_ref = openspending_api.get_phase_ref()
descript_ref = openspending_api.get_adjustment_kind_ref()
total_adjusted, total_voted = None, None
for cell in cells:
if (
cell[phase_ref] == "Adjusted appropriation"
and cell[descript_ref] == "Adjustments - Total adjustments"
):
total_adjusted = cell["value.sum"]
if (
cell[phase_ref] == "Voted (Main appropriation)"
and cell[descript_ref] == "Total"
):
total_voted = cell["value.sum"]
if total_voted and not total_adjusted:
total_adjusted = 0
elif not (total_voted or total_adjusted):
raise Exception("Could not calculate total change for department budget")
return total_voted, total_adjusted
def _get_budget_virements(self, openspending_api, dataset, total_voted):
virements_resource = dataset.get_resource("CSV", name="Value of Virements")
if virements_resource:
sql = """
SELECT "Value of Virements" FROM "{}"
WHERE "department_name"='{}'
""".format(
virements_resource["id"], self.name
)
params = {"sql": sql}
result = requests.get(CKAN_DATASTORE_URL, params=params)
result.raise_for_status()
records = result.json()["result"]["records"]
value = records[0]["Value of Virements"]
virements = {
"label": "Value of virements",
"amount": int(value),
"percentage": 100 * (float(value) / float(total_voted)),
}
else:
result_for_virements = openspending_api.aggregate(
cuts=[
openspending_api.get_financial_year_ref()
+ ":"
+ self.get_financial_year().get_starting_year(),
openspending_api.get_adjustment_kind_ref()
+ ":"
+ '"Adjustments - Virements and shifts due to savings"',
],
drilldowns=openspending_api.get_all_drilldowns(),
)
result_for_virements = openspending_api.filter_dept(
result_for_virements, self.name
)
cells_for_virements = openspending_api.filter_by_ref_exclusion(
result_for_virements["cells"],
openspending_api.get_programme_name_ref(),
DIRECT_CHARGE_NRF,
)
total_positive_virement_change = 0
for c in cells_for_virements:
if c["value.sum"] > 0:
total_positive_virement_change += c["value.sum"]
virements = {
"label": "Value of virements and shifts due to savings",
"amount": int(total_positive_virement_change),
"percentage": 100
* float(total_positive_virement_change)
/ float(total_voted),
}
return virements if virements else None
def _get_budget_special_appropriations(self, openspending_api, total_voted):
result_for_special_appropriations = openspending_api.aggregate(
cuts=[
openspending_api.get_financial_year_ref()
+ ":"
+ self.get_financial_year().get_starting_year(),
openspending_api.get_adjustment_kind_ref()
+ ":"
+ '"Special appropriation"',
],
drilldowns=[openspending_api.get_department_name_ref()],
)
result_for_special_appropriations = openspending_api.filter_dept(
result_for_special_appropriations, self.name
)
total_special_appropriations = 0
for cell in result_for_special_appropriations["cells"]:
if cell["value.sum"]:
total_special_appropriations += cell["value.sum"]
if total_special_appropriations:
return {
"amount": total_special_appropriations,
"percentage": (float(total_special_appropriations) / float(total_voted))
* 100,
}
else:
return None
def _get_budget_direct_charges(self, openspending_api):
result_for_direct_charges = openspending_api.aggregate(
cuts=[
openspending_api.get_financial_year_ref()
+ ":"
+ self.get_financial_year().get_starting_year(),
openspending_api.get_programme_name_ref() + ":" + DIRECT_CHARGE_NRF,
],
drilldowns=[
openspending_api.get_phase_ref(),
openspending_api.get_subprogramme_name_ref(),
openspending_api.get_department_name_ref(),
openspending_api.get_adjustment_kind_ref(),
],
order=[openspending_api.get_subprogramme_name_ref()],
)
result_for_direct_charges = openspending_api.filter_dept(
result_for_direct_charges, self.name
)
subprog_ref = openspending_api.get_subprogramme_name_ref()
phase_ref = openspending_api.get_phase_ref()
kind_ref = openspending_api.get_adjustment_kind_ref()
subprog_dict = OrderedDict()
for cell in result_for_direct_charges["cells"]:
if cell[kind_ref] == "Adjustments - Total adjustments":
subprog_dict[cell[subprog_ref]] = {
"amount": cell["value.sum"],
"label": cell[subprog_ref],
}
for subprog in subprog_dict.keys():
for cell in result_for_direct_charges["cells"]:
if cell[subprog_ref] == subprog:
if cell[phase_ref] == "Voted (Main appropriation)":
subprog_dict[subprog]["percentage"] = (
float(subprog_dict[subprog]["amount"])
/ float(cell["value.sum"])
) * 100
return subprog_dict.values() if subprog_dict else None
def get_all_budget_totals_by_year_and_phase(self):
""" Returns the total for each year:phase combination from the expenditure time series dataset. """
dataset = get_expenditure_time_series_dataset(self.government.sphere.slug)
if not dataset:
return None
openspending_api = dataset.get_openspending_api()
phase_ref = openspending_api.get_phase_ref()
year_ref = openspending_api.get_financial_year_ref()
total_budget_cuts = [
openspending_api.get_adjustment_kind_ref() + ":" + '"Total"'
]
total_budget_drilldowns = [
year_ref,
phase_ref,
openspending_api.get_programme_name_ref(),
]
total_budget_results = openspending_api.aggregate(
cuts=total_budget_cuts, drilldowns=total_budget_drilldowns
)
total_budget_filtered = openspending_api.filter_by_ref_exclusion(
total_budget_results["cells"],
openspending_api.get_programme_name_ref(),
DIRECT_CHARGE_NRF,
)
total_budget_aggregated = openspending_api.aggregate_by_refs(
[year_ref, phase_ref], total_budget_filtered
)
total_budgets = {}
for cell in total_budget_aggregated:
if cell[phase_ref] not in total_budgets.keys():
total_budgets[cell[phase_ref]] = {
cell[year_ref]: float(cell["value.sum"])
}
else:
total_budgets[cell[phase_ref]][cell[year_ref]] = float(
cell["value.sum"]
)
return total_budgets
def get_national_expenditure_treemap(self, financial_year_id, budget_phase):
""" Returns a data object for each department, year and phase. Adds additional data required for the Treemap.
From the Expenditure Time Series dataset. """
# Take budget sphere, year and phase as positional arguments from URL
# Output expenditure specific to sphere:year:phase scope, simple list of objects
try:
selected_phase = EXPENDITURE_TIME_SERIES_PHASE_MAPPING[budget_phase]
except KeyError:
raise Exception("An invalid phase was provided: {}".format(budget_phase))
expenditure = []
dataset = get_expenditure_time_series_dataset(self.government.sphere.slug)
if not dataset:
return None
openspending_api = dataset.get_openspending_api()
phase_ref = openspending_api.get_phase_ref()
year_ref = openspending_api.get_financial_year_ref()
# Add cuts: year and phase
expenditure_cuts = [
openspending_api.get_adjustment_kind_ref() + ":" + '"Total"',
year_ref
+ ":"
+ "{}".format(FinancialYear.start_from_year_slug(financial_year_id)),
phase_ref + ":" + '"{}"'.format(selected_phase),
]
expenditure_drilldowns = [
year_ref,
phase_ref,
openspending_api.get_department_name_ref(),
openspending_api.get_programme_name_ref(),
]
expenditure_results = openspending_api.aggregate(
cuts=expenditure_cuts, drilldowns=expenditure_drilldowns
)
# Disaggregate and filter out any direct charge NRF programmes
filtered_cells = openspending_api.filter_by_ref_exclusion(
expenditure_results["cells"],
openspending_api.get_programme_name_ref(),
DIRECT_CHARGE_NRF,
)
# Re-aggregate by year:phase
result_cells = openspending_api.aggregate_by_refs(
[openspending_api.get_department_name_ref(), year_ref, phase_ref],
filtered_cells,
)
total_budget = 0
filtered_result_cells = []
national_depts = Department.objects.filter(
government__sphere__slug="national", is_vote_primary=True
)
dept = None
for cell in result_cells:
try:
if year_ref in cell:
dept = national_depts.get(
government__sphere__financial_year__slug=FinancialYear.slug_from_year_start(
str(cell[year_ref])
),
slug=slugify(cell[openspending_api.get_department_name_ref()]),
)
except Department.DoesNotExist:
logger.warning(
"Excluding: national {} {}".format(
cell[year_ref], cell[openspending_api.get_department_name_ref()]
)
)
continue
total_budget += float(cell["value.sum"])
cell["url"] = dept.get_preview_url_path() if dept else None
filtered_result_cells.append(cell)
for cell in filtered_result_cells:
percentage_of_total = (
float(cell["value.sum"]) / total_budget * 100
if cell["value.sum"]
else 0
)
name = (
cell[openspending_api.get_department_name_ref()]
if openspending_api.get_department_name_ref() in cell
else ""
)
expenditure.append(
{
"name": name,
"slug": slugify(name),
"amount": float(cell["value.sum"]),
"percentage_of_total": percentage_of_total,
"province": None, # to keep a consistent schema
"url": cell["url"],
}
)
return (
{"data": {"items": expenditure, "total": total_budget}}
if expenditure
else None
)
def get_provincial_expenditure_treemap(self, financial_year_id, budget_phase):
""" Returns a list of department objects nested in provinces. """
# Take budget sphere, year and phase as positional arguments from URL
# Output expenditure specific to sphere:year:phase scope, simple list of objects
try:
selected_phase = EXPENDITURE_TIME_SERIES_PHASE_MAPPING[budget_phase]
except KeyError:
raise Exception("An invalid phase was provided: {}".format(budget_phase))
expenditure = []
dataset = get_expenditure_time_series_dataset(self.government.sphere.slug)
if not dataset:
return None
openspending_api = dataset.get_openspending_api()
# Add cuts: year and phase
expenditure_cuts = [
openspending_api.get_adjustment_kind_ref() + ":" + '"Total"',
openspending_api.get_financial_year_ref()
+ ":"
+ "{}".format(FinancialYear.start_from_year_slug(financial_year_id)),
openspending_api.get_phase_ref() + ":" + '"{}"'.format(selected_phase),
]
expenditure_drilldowns = [
openspending_api.get_department_name_ref(),
openspending_api.get_geo_ref(),
]
expenditure_results = openspending_api.aggregate(
cuts=expenditure_cuts, drilldowns=expenditure_drilldowns
)
total_budget = 0
filtered_result_cells = []
provincial_depts = Department.objects.filter(
government__sphere__financial_year__slug=financial_year_id,
government__sphere__slug="provincial",
is_vote_primary=True,
)
for cell in expenditure_results["cells"]:
try:
dept = provincial_depts.get(
slug=slugify(cell[openspending_api.get_department_name_ref()]),
government__slug=slugify(cell[openspending_api.get_geo_ref()]),
)
except Department.DoesNotExist:
logger.warning(
"Excluding: provincial {} {} {}".format(
financial_year_id,
cell[openspending_api.get_geo_ref()],
cell[openspending_api.get_department_name_ref()],
)
)
continue
total_budget += float(cell["value.sum"])
cell["url"] = dept.get_preview_url_path() if dept else None
filtered_result_cells.append(cell)
for cell in filtered_result_cells:
expenditure.append(
{
"name": cell[openspending_api.get_department_name_ref()],
"slug": slugify(cell[openspending_api.get_department_name_ref()]),
"amount": float(cell["value.sum"]),
"province": cell[openspending_api.get_geo_ref()],
"url": cell["url"],
}
)
return (
{"data": {"items": expenditure, "total": total_budget}}
if expenditure
else None
)
def get_expenditure_time_series_summary(self):
cpi_year_slug, cpi_resource_id = Dataset.get_latest_cpi_resource()
base_year = get_base_year(cpi_year_slug)
financial_year_start = self.get_financial_year().get_starting_year()
financial_year_start_int = int(financial_year_start)
financial_year_starts = [
str(y)
for y in range(financial_year_start_int - 3, financial_year_start_int + 1)
]
expenditure = {"nominal": [], "real": []}
dataset = get_expenditure_time_series_dataset(self.government.sphere.slug)
if not dataset:
return None
openspending_api = dataset.get_openspending_api()
cuts = [
openspending_api.get_adjustment_kind_ref() + ":" + '"Total"',
openspending_api.get_geo_ref() + ":" + '"%s"' % self.government.name,
]
drilldowns = [
openspending_api.get_financial_year_ref(),
openspending_api.get_phase_ref(),
openspending_api.get_department_name_ref(),
openspending_api.get_programme_name_ref(),
]
budget_results = openspending_api.aggregate(cuts=cuts, drilldowns=drilldowns)
result = openspending_api.filter_dept(budget_results, self.name)
filtered_cells = openspending_api.filter_by_ref_exclusion(
result["cells"],
openspending_api.get_programme_name_ref(),
DIRECT_CHARGE_NRF,
)
result_cells = openspending_api.aggregate_by_refs(
[
openspending_api.get_department_name_ref(),
openspending_api.get_financial_year_ref(),
openspending_api.get_phase_ref(),
],
filtered_cells,
)
if result_cells:
cpi = get_cpi()
for financial_year_start in financial_year_starts:
for phase in EXPENDITURE_TIME_SERIES_PHASES:
cells = [
c
for c in result_cells
if c[openspending_api.get_financial_year_ref()]
== int(financial_year_start)
and c[openspending_api.get_phase_ref()] == phase
]
if cells:
cell = cells[0]
nominal = cell["value.sum"]
expenditure["nominal"].append(
{
"financial_year": FinancialYear.slug_from_year_start(
financial_year_start
),
"amount": nominal,
"phase": phase,
}
)
expenditure["real"].append(
{
"financial_year": FinancialYear.slug_from_year_start(
financial_year_start
),
"amount": int(
(
Decimal(nominal)
/ cpi[financial_year_start]["index"]
)
* 100
),
"phase": phase,
}
)
missing_phases_count = {}
found = False
for fiscal_year in financial_year_starts:
for fiscal_phase in EXPENDITURE_TIME_SERIES_PHASES:
for fiscal_type in expenditure:
for item in expenditure[fiscal_type]:
found = False
if (
item["financial_year"]
== FinancialYear.slug_from_year_start(fiscal_year)
and item["phase"] == fiscal_phase
):
found = True
break
if not found:
expenditure[fiscal_type].append(
{
"financial_year": FinancialYear.slug_from_year_start(
fiscal_year
),
"phase": fiscal_phase,
"amount": None,
}
)
if fiscal_year not in missing_phases_count:
missing_phases_count[fiscal_year] = 1
else:
missing_phases_count[fiscal_year] += 1
expenditure["base_financial_year"] = FinancialYear.slug_from_year_start(
str(base_year)
)
# Generate notices if applicable
no_data_for_years = []
no_dept_for_years = []
notices = []
for year, count in missing_phases_count.items():
# 8 because 4 phases real and nominal
if count != 8:
# All phases for a given year must be missing before starting any checks
continue
single_year_cuts = [
openspending_api.get_adjustment_kind_ref() + ":" + '"Total"',
openspending_api.get_financial_year_ref() + ":" + year,
]
single_year_budget_results = openspending_api.aggregate(
cuts=single_year_cuts
)
if single_year_budget_results["cells"]:
value_sum = single_year_budget_results["cells"][0]["value.sum"]
if value_sum is not None and value_sum > 0:
# dept did not exist, since there is data for other departments
no_dept_for_years.append(year)
else:
# no data for this fiscal year, so data hasn't been published yet
no_data_for_years.append(year)
else:
# no data for this fiscal year, so data hasn't been published yet
no_data_for_years.append(year)
if no_data_for_years:
notice_string = "Please note that the data for"
no_data_for_years.sort()
if len(no_data_for_years) == 1:
notice_string += " {}".format(no_data_for_years[0])
elif len(no_data_for_years) == 2:
notice_string += " {}".format(" and ".join(no_data_for_years))
else:
notice_string += " {}".format(", ".join(no_data_for_years[:-1]))
notice_string += " and {}".format(no_data_for_years[-1])
notice_string += " has not been published on vulekamali."
notices.append(notice_string)
if no_dept_for_years:
notices.append(
"This department did not exist for some years displayed."
)
return {
"notices": notices,
"expenditure": expenditure,
"dataset_detail_page": dataset.get_url_path(),
}
else:
logger.warning(
"Missing expenditure time series data for %r budget year %s",
cuts,
self.get_financial_year().slug,
)
return None
def get_expenditure_time_series_by_programme(self):
financial_year_start = self.get_financial_year().get_starting_year()
financial_year_start_int = int(financial_year_start)
year_ints = range(financial_year_start_int - 3, financial_year_start_int + 1)
financial_year_starts = [str(y) for y in year_ints]
programmes = {}
dataset = get_expenditure_time_series_dataset(self.government.sphere.slug)
if not dataset:
return None
openspending_api = dataset.get_openspending_api()
cuts = [
openspending_api.get_adjustment_kind_ref() + ":" + '"Total"',
openspending_api.get_geo_ref() + ":" + '"%s"' % self.government.name,
]
drilldowns = [
openspending_api.get_financial_year_ref(),
openspending_api.get_phase_ref(),
openspending_api.get_department_name_ref(),
openspending_api.get_programme_name_ref(),
]
budget_results = openspending_api.aggregate(cuts=cuts, drilldowns=drilldowns)
result = openspending_api.filter_dept(budget_results, self.name)
if result["cells"]:
prog_names = [
cell[openspending_api.get_programme_name_ref()]
for cell in result["cells"]
]
prog_names = set(prog_names)
for financial_year_start in financial_year_starts:
for phase in EXPENDITURE_TIME_SERIES_PHASES:
for prog_name in prog_names:
cells = [
c
for c in result["cells"]
if c[openspending_api.get_financial_year_ref()]
== int(financial_year_start)
and c[openspending_api.get_phase_ref()] == phase
and c[openspending_api.get_programme_name_ref()]
== prog_name
]
if cells:
cell = cells[0]
nominal = cell["value.sum"]
try:
programmes[prog_name]
except KeyError:
programmes[prog_name] = {"name": prog_name, "items": []}
programmes[prog_name]["items"].append(
{
"financial_year": FinancialYear.slug_from_year_start(
financial_year_start
),
"amount": nominal,
"phase": phase,
}
)
found = False
missing_phases_count = {}
for fiscal_year in financial_year_starts:
for fiscal_phase in EXPENDITURE_TIME_SERIES_PHASES:
for program in programmes:
for item in programmes[program]["items"]:
found = False
if (
item["financial_year"]
== FinancialYear.slug_from_year_start(fiscal_year)
and item["phase"] == fiscal_phase
):
found = True
break
if not found:
programmes[program]["items"].append(
{
"financial_year": FinancialYear.slug_from_year_start(
fiscal_year
),
"phase": fiscal_phase,
"amount": None,
}
)
if fiscal_year not in missing_phases_count:
missing_phases_count[fiscal_year] = {program: 1}
else:
if (
program
not in missing_phases_count[fiscal_year].keys()
):
missing_phases_count[fiscal_year][program] = 1
else:
missing_phases_count[fiscal_year][program] += 1
no_prog_for_years = False
notices = []
for year, progs in missing_phases_count.items():
if no_prog_for_years:
break
for p, count in progs.items():
if no_prog_for_years:
break
if count == 4:
single_year_cuts = [
openspending_api.get_adjustment_kind_ref()
+ ":"
+ '"Total"',
openspending_api.get_financial_year_ref() + ":" + year,
]
single_year_budget_results = openspending_api.aggregate(
cuts=single_year_cuts
)
if single_year_budget_results["cells"]:
value_sum = single_year_budget_results["cells"][0][
"value.sum"
]
if value_sum is not None and value_sum > 0:
# prog did not exist, since there is data for other programmes
no_prog_for_years = True
if no_prog_for_years:
notices.append(
"One or more programmes did not exist for some years displayed."
)
return {
"notices": notices,
"programmes": programmes.values(), # FIXME need to add sorting with python3
"dataset_detail_page": dataset.get_url_path(),
}
else:
logger.warning(
"Missing expenditure time series data for %r budget year %s",
cuts,
self.get_financial_year().slug,
)
return None
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.get_url_path())
class GovtFunction(models.Model):
name = models.CharField(max_length=200, unique=True)
slug = AutoSlugField(
populate_from="name", max_length=200, always_update=True, unique=True
)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.slug)
class Programme(models.Model):
organisational_unit = "programme"
department = models.ForeignKey(
Department, on_delete=models.CASCADE, related_name="programmes"
)
name = models.CharField(max_length=200)
slug = AutoSlugField(populate_from="name", max_length=200, always_update=True)
programme_number = models.IntegerField()
govt_functions = models.ManyToManyField(GovtFunction)
class Meta:
unique_together = (
("department", "slug"),
("department", "name"),
("department", "programme_number"),
)
ordering = ["programme_number"]
def get_url_path(self):
return "%s/programmes/%s" % (self.department.get_url_path(), self.slug)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.get_url_path())
class InfrastructureProjectPart(models.Model):
administration_type = models.CharField(max_length=255)
government_institution = models.CharField(max_length=255)
sector = models.CharField(max_length=255)
project_name = models.CharField(max_length=255)
project_description = models.TextField()
nature_of_investment = models.CharField(max_length=255)
infrastructure_type = models.CharField(max_length=255)
current_project_stage = models.CharField(max_length=255)
sip_category = models.CharField(max_length=255)
br_featured = models.CharField(max_length=255)
featured = models.BooleanField()
budget_phase = models.CharField(max_length=255)
project_slug = models.CharField(max_length=255)
amount_rands = models.BigIntegerField(blank=True, null=True, default=None)
financial_year = models.CharField(max_length=4)
project_value_rands = models.BigIntegerField(default=0)
provinces = models.CharField(max_length=510, default="")
gps_code = models.CharField(max_length=255, default="")
# PPP fields
partnership_type = models.CharField(max_length=255, null=True, blank=True)
date_of_close = models.CharField(max_length=255, null=True, blank=True)
duration = models.CharField(max_length=255, null=True, blank=True)
financing_structure = models.CharField(max_length=255, null=True, blank=True)
project_value_rand_million = models.CharField(max_length=255, null=True, blank=True)
form_of_payment = models.CharField(max_length=255, null=True, blank=True)
class Meta:
verbose_name = "National infrastructure project part"
def __str__(self):
return "{} ({} {})".format(
self.project_slug, self.budget_phase, self.financial_year
)
def get_url_path(self):
return "/infrastructure-projects/{}".format(self.project_slug)
def get_absolute_url(self):
return reverse("infrastructure-projects", args=[self.project_slug])
def calculate_projected_expenditure(self):
""" Calculate sum of predicted amounts from a list of records """
projected_records_for_project = InfrastructureProjectPart.objects.filter(
budget_phase="MTEF", project_slug=self.project_slug
)
projected_expenditure = 0
for project in projected_records_for_project:
projected_expenditure += float(project.amount_rands or 0.0)
return projected_expenditure
@staticmethod
def _parse_coordinate(coordinate):
""" Expects a single set of coordinates (lat, long) split by a comma """
if not isinstance(coordinate, str):
raise TypeError("Invalid type for coordinate parsing")
lat_long = [float(x) for x in coordinate.split(",")]
cleaned_coordinate = {"latitude": lat_long[0], "longitude": lat_long[1]}
return cleaned_coordinate
@classmethod
def clean_coordinates(cls, raw_coordinate_string):
cleaned_coordinates = []
try:
if "and" in raw_coordinate_string:
list_of_coordinates = raw_coordinate_string.split("and")
for coordinate in list_of_coordinates:
cleaned_coordinates.append(cls._parse_coordinate(coordinate))
elif "," in raw_coordinate_string:
cleaned_coordinates.append(cls._parse_coordinate(raw_coordinate_string))
else:
logger.warning("Invalid co-ordinates: {}".format(raw_coordinate_string))
except Exception as e:
logger.warning(
"Caught Exception '{}' for co-ordinates {}".format(
e, raw_coordinate_string
)
)
return cleaned_coordinates
@staticmethod
def _get_province_from_coord(coordinate):
""" Expects a cleaned coordinate """
key = f"coordinate province {coordinate['latitude']}, {coordinate['longitude']}"
province_name = cache.get(key, default="cache-miss")
if province_name == "cache-miss":
logger.info(f"Coordinate Province Cache MISS for coordinate {key}")
params = {"type": "PR"}
province_result = requests.get(
MAPIT_POINT_API_URL.format(
coordinate["longitude"], coordinate["latitude"]
),
params=params,
)
province_result.raise_for_status()
r = province_result.json()
list_of_objects_returned = list(r.values())
if len(list_of_objects_returned) > 0:
province_name = list_of_objects_returned[0]["name"]
else:
province_name = None
cache.set(key, province_name)
else:
logger.info(f"Coordinate Province Cache HIT for coordinate {key}")
return province_name
@staticmethod
def _get_province_from_project_name(project_name):
""" Searches name of project for province name or abbreviation """
project_name_slug = slugify(project_name)
new_dict = {}
for prov_name in prov_abbrev.keys():
new_dict[prov_name] = slugify(prov_name)
for name, slug in new_dict.items():
if slug in project_name_slug:
return name
return None
@classmethod
def get_provinces(cls, cleaned_coordinates=None, project_name=""):
""" Returns a list of provinces based on values in self.coordinates """
provinces = set()
if cleaned_coordinates:
for c in cleaned_coordinates:
province = cls._get_province_from_coord(c)
if province:
provinces.add(province)
else:
logger.warning(
"Couldn't find GPS co-ordinates for infrastructure project '{}' on MapIt: {}".format(
project_name, c
)
)
else:
province = cls._get_province_from_project_name(project_name)
if province:
logger.info("Found province {} in project name".format(province))
provinces.add(province)
return list(provinces)
@staticmethod
def _build_expenditure_item(project):
return {
"year": project.financial_year,
"amount": project.amount_rands,
"budget_phase": project.budget_phase,
}
def build_complete_expenditure(self):
complete_expenditure = []
projects = InfrastructureProjectPart.objects.filter(
project_slug=self.project_slug
)
for project in projects:
complete_expenditure.append(self._build_expenditure_item(project))
return complete_expenditure
prov_keys = prov_abbrev.keys()
prov_choices = tuple([(prov_key, prov_key) for prov_key in prov_keys])
class Event(models.Model):
start_date = models.DateField(default=datetime.now)
date = models.CharField(max_length=255)
type = models.CharField(
max_length=255,
choices=(
("hackathon", "hackathon"),
("dataquest", "dataquest"),
("cid", "cid"),
("gift-dataquest", "gift-dataquest"),
),
)
province = models.CharField(max_length=255, choices=prov_choices)
where = models.CharField(max_length=255)
url = models.URLField(blank=True, null=True)
notes_url = models.URLField(blank=True, null=True)
video_url = models.URLField(blank=True, null=True)
rsvp_url = models.URLField(blank=True, null=True)
presentation_url = models.URLField(blank=True, null=True)
status = models.CharField(
max_length=255,
default="upcoming",
choices=(("upcoming", "upcoming"), ("past", "past")),
)
class Meta:
ordering = ["-start_date"]
def __str__(self):
return "{} {} ({} {})".format(self.type, self.date, self.where, self.province)
def get_absolute_url(self):
return reverse("events")
class VideoLanguage(SortableMixin):
label = models.CharField(max_length=255)
youtube_id = models.CharField(max_length=255, null=True, blank=True)
video = models.ForeignKey("Video", null=True, blank=True, on_delete=models.SET_NULL)
video_language_order = models.PositiveIntegerField(
default=0, editable=False, db_index=True
)
class Meta:
ordering = ["video_language_order"]
def __str__(self):
return self.label
class Video(SortableMixin):
title_id = models.CharField(max_length=255)
title = models.CharField(max_length=255)
description = models.TextField(max_length=510)
video_order = models.PositiveIntegerField(default=0, editable=False, db_index=True)
class Meta:
ordering = ["video_order"]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("videos")
class FAQ(SortableMixin):
title = models.CharField(max_length=1024)
content = RichTextField()
the_order = models.PositiveIntegerField(default=0, editable=False, db_index=True)
def __str__(self):
return self.title
class Meta:
verbose_name = "FAQ"
verbose_name_plural = "FAQs"
ordering = ["the_order"]
class Quarter(models.Model):
number = models.IntegerField(unique=True)
class Meta:
ordering = ["number"]
def __str__(self):
return "Quarter %d" % self.number
def irm_snapshot_file_path(instance, filename):
extension = filename.split(".")[-1]
return (
f"irm-snapshots/{uuid.uuid4()}/"
f"{instance.sphere.financial_year.slug}-Q{instance.quarter.number}-"
f"{instance.sphere.slug}-taken-{instance.date_taken.isoformat()[:18]}.{extension}"
)
class IRMSnapshot(models.Model):
"""
This represents a particular snapshot from the Infrastructure Reporting Model
(IRM) database
"""
sphere = models.ForeignKey(Sphere, on_delete=models.CASCADE)
quarter = models.ForeignKey(Quarter, on_delete=models.CASCADE)
date_taken = models.DateTimeField()
file = models.FileField(upload_to=irm_snapshot_file_path)
created_at = models.DateTimeField(auto_now_add=True, blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)
class Meta:
ordering = ["sphere__financial_year__slug", "quarter__number"]
verbose_name = "IRM Snapshot"
unique_together = ["sphere", "quarter"]
def __str__(self):
return (
f"{self.sphere.name} "
f"{self.sphere.financial_year.slug} Q{self.quarter.number} "
f"taken {self.date_taken.isoformat()[:18]}"
)
class InfraProject(models.Model):
"""
This represents a project, grouping its snapshots by project's ID in IRM.
This assumes the same project will have the same ID in IRM across financial years.
The internal ID of these instances is used as the ID in the URL, since we don't
want to expose IRM IDs publicly but we want to have a consistent URL for projects.
We don't delete these even when there's no snapshots associated with them
so that the URL based on this id remains consistent in case projects with this
IRM ID are uploaded after snapshots are deleted.
"""
IRM_project_id = models.IntegerField()
sphere_slug = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True, blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)
class Meta:
verbose_name = "Infrastructure project"
unique_together = ["sphere_slug", "IRM_project_id"]
def __str__(self):
if self.project_snapshots.count():
return self.project_snapshots.latest().name
else:
return f"{self.sphere_slug} IRM project ID {self.IRM_project_id} (no snapshots loaded)"
def get_slug(self):
return slugify(
"{0} {1}".format(
self.project_snapshots.latest().name,
self.project_snapshots.latest().province,
)
)
def get_absolute_url(self):
args = [self.pk, self.get_slug()]
return reverse("infra-project-detail", args=args)
@property
def csv_download_url(self):
return reverse(
"infra-project-detail-csv-download", args=(self.id, self.get_slug()),
)
class InfraProjectSnapshot(models.Model):
"""This represents a snapshot of a project, as it occurred in an IRM snapshot"""
irm_snapshot = models.ForeignKey(
IRMSnapshot, on_delete=models.CASCADE, related_name="project_snapshots"
)
project = models.ForeignKey(
InfraProject, on_delete=models.CASCADE, related_name="project_snapshots"
)
project_number = models.CharField(max_length=1024, blank=True, null=True)
name = models.CharField(max_length=1024, blank=True, null=True)
department = models.CharField(max_length=1024, blank=True, null=True)
sector = models.CharField(max_length=1024, blank=True, null=True)
province = models.CharField(max_length=1024, blank=True, null=True)
local_municipality = models.CharField(max_length=1024, blank=True, null=True)
district_municipality = models.CharField(max_length=1024, blank=True, null=True)
latitude = models.CharField(max_length=20, blank=True, null=True)
longitude = models.CharField(max_length=20, blank=True, null=True)
status = models.CharField(max_length=1024, blank=True, null=True)
budget_programme = models.CharField(max_length=1024, blank=True, null=True)
primary_funding_source = models.CharField(max_length=1024, blank=True, null=True)
nature_of_investment = models.CharField(max_length=1024, blank=True, null=True)
funding_status = models.CharField(max_length=1024, blank=True, null=True)
program_implementing_agent = models.CharField(
max_length=1024, blank=True, null=True
)
principle_agent = models.CharField(max_length=1024, blank=True, null=True)
main_contractor = models.CharField(max_length=1024, blank=True, null=True)
other_parties = models.TextField(blank=True, null=True)
# Dates
start_date = models.DateField(blank=True, null=True)
estimated_construction_start_date = models.DateField(blank=True, null=True)
estimated_completion_date = models.DateField(blank=True, null=True)
contracted_construction_end_date = models.DateField(blank=True, null=True)
estimated_construction_end_date = models.DateField(blank=True, null=True)
# Budgets and spending
total_professional_fees = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
total_construction_costs = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
variation_orders = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
estimated_total_project_cost = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
expenditure_from_previous_years_professional_fees = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
expenditure_from_previous_years_construction_costs = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
expenditure_from_previous_years_total = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
project_expenditure_total = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
main_appropriation_professional_fees = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
adjusted_appropriation_professional_fees = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
main_appropriation_construction_costs = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
adjusted_appropriation_construction_costs = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
main_appropriation_total = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
adjusted_appropriation_total = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
actual_expenditure_q1 = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
actual_expenditure_q2 = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
actual_expenditure_q3 = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
actual_expenditure_q4 = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
# Metadata
created_at = models.DateTimeField(auto_now_add=True, blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)
class Meta:
ordering = [
"irm_snapshot__sphere__financial_year__slug",
"irm_snapshot__quarter__number",
]
get_latest_by = "irm_snapshot"
verbose_name = "Infrastructure project snapshot"
unique_together = ["irm_snapshot", "project"]
@property
def government(self):
if self.irm_snapshot.sphere.slug == "national":
return "South Africa"
elif self.irm_snapshot.sphere.slug == "provincial":
return self.province
else:
raise Exception(f"Unexpected sphere {self.irm_snapshot.sphere}")
@property
def government_label(self):
if self.irm_snapshot.sphere.slug == "national":
return "National"
elif self.irm_snapshot.sphere.slug == "provincial":
return self.province
else:
raise Exception(f"Unexpected sphere {self.irm_snapshot.sphere}")
def __str__(self):
return self.name
# https://stackoverflow.com/questions/35633037/search-for-document-in-solr-where-a-multivalue-field-is-either-empty
# -or-has-a-sp
def none_selected_query(vocab_name):
"""Match items where none of the options in a custom vocab tag is selected"""
return '+(*:* NOT %s:["" TO *])' % vocab_name
def extras_set(extras, key, value):
for extra in extras:
if extra["key"] == key:
extra["value"] = value
break
def resource_name(department):
return "Vote %d - %s" % (department.vote_number, department.name)
def get_base_year(cpi_year_slug):
return int(cpi_year_slug[:4]) - 1
def get_cpi():
cpi_year_slug, cpi_resource_id = Dataset.get_latest_cpi_resource()
base_year = get_base_year(cpi_year_slug)
sql = """
SELECT "Year", "CPI" FROM "{}"
ORDER BY "Year"
""".format(
cpi_resource_id
)
params = {"sql": sql}
result = requests.get(CKAN_DATASTORE_URL, params=params)
result.raise_for_status()
cpi = result.json()["result"]["records"]
base_year_index = None
for idx, cell in enumerate(cpi):
financial_year_start = cell["Year"][:4]
cell["financial_year_start"] = financial_year_start
if financial_year_start == str(base_year):
base_year_index = idx
cell["index"] = 100
for idx in range(base_year_index - 1, -1, -1):
cpi[idx]["index"] = cpi[idx + 1]["index"] / (1 + Decimal(cpi[idx + 1]["CPI"]))
for idx in range(base_year_index + 1, len(cpi)):
cpi[idx]["index"] = cpi[idx - 1]["index"] * (1 + Decimal(cpi[idx]["CPI"]))
cpi_dict = {}
for cell in cpi:
cpi_dict[cell["financial_year_start"]] = cell
return cpi_dict
def get_vocab_map():
vocab_map = {}
for vocab in ckan.action.vocabulary_list():
vocab_map[vocab["name"]] = vocab["id"]
return vocab_map
def csv_url(aggregate_url):
querystring = "?api_url=" + quote(aggregate_url)
csv_url = reverse("openspending_csv") + querystring
if len(csv_url) > URL_LENGTH_LIMIT:
raise Exception(
"Generated URL exceeds max length of %s. "
"Some browsers may no longer be able to interpret the URL."
% URL_LENGTH_LIMIT
)
return csv_url
class WagtailHomePage(Page):
parent_page_types = []
subpage_types = ["budgetportal.LearningIndexPage", "budgetportal.PostIndexPage"]
class Meta:
app_label = "budgetportal"
class LearningIndexPage(Page):
subpage_types = ["budgetportal.GuideIndexPage"]
max_count = 1
class PostIndexPage(Page):
subpage_types = ["budgetportal.PostPage"]
max_count = 1
intro = RichTextField(blank=True)
content_panels = Page.content_panels + [FieldPanel("intro", classname="full")]
class GuideIndexPage(Page):
max_count = 1
parent_page_types = ["budgetportal.LearningIndexPage"]
subpage_types = ["budgetportal.GuidePage"]
intro = RichTextField(blank=True)
content_panels = Page.content_panels + [FieldPanel("intro", classname="full")]
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
guides_ordering = OrderedDict([(p.title, p) for p in self.get_children()])
for external in CategoryGuide.objects.filter(external_url__isnull=False):
guides_ordering[external.external_url_title] = external
context["guides"] = guides_ordering.values()
return context
class GuidePage(Page):
parent_page_types = ["budgetportal.GuideIndexPage"]
body = StreamField(
[("section", SectionBlock()), ("html", wagtail_blocks.RawHTMLBlock()),]
)
created_at = models.DateTimeField(auto_now_add=True, blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)
image = models.ForeignKey(
"wagtailimages.Image",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
)
content_panels = Page.content_panels + [
StreamFieldPanel("body"),
ImageChooserPanel("image"),
]
class CategoryGuide(models.Model):
"""Link GuidePages or external URLs to dataset category slugs"""
category_slug = models.SlugField(max_length=200, unique=True)
guide_page = models.ForeignKey(
GuidePage, null=True, blank=True, on_delete=models.CASCADE
)
external_url = models.URLField(null=True, blank=True)
external_url_title = models.CharField(
max_length=200,
null=True,
blank=True,
help_text=(
"Only shown if External URL is used. This may be truncated so "
"use a short description that will also be seen on the external page."
),
)
external_url_description = models.TextField(
null=True,
blank=True,
help_text=(
"Only shown if External URL is used. This may be truncated so "
"use a short description that will also be seen on the external page."
),
)
def __str__(self):
return "{} - {}".format(
self.category_slug, self.guide_page or self.external_url
)
def clean(self):
if self.external_url is None and self.guide_page is None:
raise ValidationError("Either Guide Page or External URL must be set.")
if self.external_url is not None and self.guide_page is not None:
raise ValidationError("Only one of Guide Page or External URL may be set.")
if self.external_url is not None and self.external_url_title is None:
raise ValidationError("Title is required when using External URL.")
return super().clean()
class NavContextMixin:
def get_context(self, request):
context = super().get_context(request)
nav = nav_bar.get_items(FinancialYear.get_latest_year().slug)
context["navbar"] = nav
for item in nav:
if item["url"] and request.path.startswith(item["url"]):
context["selected_tab"] = item["id"]
return context
class PostPage(Page):
parent_page_types = ["budgetportal.PostIndexPage"]
body = StreamField(
[("section", SectionBlock()), ("html", wagtail_blocks.RawHTMLBlock()),]
)
created_at = models.DateTimeField(auto_now_add=True, blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)
image = models.ForeignKey(
"wagtailimages.Image",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
)
content_panels = Page.content_panels + [
StreamFieldPanel("body"),
]
| 39.073992 | 117 | 0.591133 |
b62a527fdc22bbb824e68862136b20c89f56baf0 | 19,098 | py | Python | tap_snowflake/__init__.py | lab49/pipelinewise-tap-snowflake | fad7264054e7b4b094162124db695115e9f96091 | [
"Apache-2.0"
] | null | null | null | tap_snowflake/__init__.py | lab49/pipelinewise-tap-snowflake | fad7264054e7b4b094162124db695115e9f96091 | [
"Apache-2.0"
] | null | null | null | tap_snowflake/__init__.py | lab49/pipelinewise-tap-snowflake | fad7264054e7b4b094162124db695115e9f96091 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# pylint: disable=missing-docstring,not-an-iterable,too-many-locals,too-many-arguments,too-many-branches,invalid-name,duplicate-code,too-many-statements
import collections
import copy
import itertools
import re
import sys
import logging
import singer
import singer.metrics as metrics
import singer.schema
import snowflake.connector
from singer import metadata
from singer import utils
from singer.catalog import Catalog, CatalogEntry
from singer.schema import Schema
import tap_snowflake.sync_strategies.common as common
import tap_snowflake.sync_strategies.full_table as full_table
import tap_snowflake.sync_strategies.incremental as incremental
from tap_snowflake.connection import SnowflakeConnection
LOGGER = singer.get_logger('tap_snowflake')
# Max number of rows that a SHOW SCHEMAS|TABLES|COLUMNS can return.
# If more than this number of rows returned then tap-snowflake will raise TooManyRecordsException
SHOW_COMMAND_MAX_ROWS = 9999
# Tone down snowflake connector logs noise
logging.getLogger('snowflake.connector').setLevel(logging.WARNING)
Column = collections.namedtuple('Column', [
'table_catalog',
'table_schema',
'table_name',
'column_name',
'data_type',
'character_maximum_length',
'numeric_precision',
'numeric_scale'])
REQUIRED_CONFIG_KEYS = [
'account',
'dbname',
'user',
'password',
'warehouse',
'tables'
]
# Snowflake data types
STRING_TYPES = set(['varchar', 'char', 'character', 'string', 'text'])
NUMBER_TYPES = set(['number', 'decimal', 'numeric'])
INTEGER_TYPES = set(['int', 'integer', 'bigint', 'smallint'])
FLOAT_TYPES = set(['float', 'float4', 'float8', 'real', 'double', 'double precision'])
DATETIME_TYPES = set(['datetime', 'timestamp', 'date', 'timestamp_ltz', 'timestamp_ntz', 'timestamp_tz'])
ARRAY_TYPE = set(['array'])
BINARY_TYPE = set(['binary', 'varbinary'])
def schema_for_column(c):
'''Returns the Schema object for the given Column.'''
data_type = c.data_type.lower()
inclusion = 'available'
result = Schema(inclusion=inclusion)
if data_type == 'boolean':
result.type = ['null', 'boolean']
elif data_type in INTEGER_TYPES:
result.type = ['null', 'number']
elif data_type in FLOAT_TYPES:
result.type = ['null', 'number']
elif data_type in NUMBER_TYPES:
result.type = ['null', 'number']
elif data_type in STRING_TYPES:
result.type = ['null', 'string']
result.maxLength = c.character_maximum_length
elif data_type in DATETIME_TYPES:
result.type = ['null', 'string']
result.format = 'date-time'
elif data_type == 'time':
result.type = ['null', 'string']
result.format = 'time'
elif data_type in BINARY_TYPE:
result.type = ['null', 'string']
result.format = 'binary'
elif data_type in ARRAY_TYPE:
result.type = ['null', 'string']
else:
result = Schema(None,
inclusion='unsupported',
description='Unsupported data type {}'.format(data_type))
return result
def create_column_metadata(cols):
mdata = {}
mdata = metadata.write(mdata, (), 'selected-by-default', False)
for c in cols:
schema = schema_for_column(c)
mdata = metadata.write(mdata,
('properties', c.column_name),
'selected-by-default',
schema.inclusion != 'unsupported')
mdata = metadata.write(mdata,
('properties', c.column_name),
'sql-datatype',
c.data_type.lower())
return metadata.to_list(mdata)
def get_table_columns(snowflake_conn, tables):
"""Get column definitions of a list of tables
It's using SHOW commands instead of INFORMATION_SCHEMA views because information_schemas views are slow
and can cause unexpected exception of:
Information schema query returned too much data. Please repeat query with more selective predicates.
"""
table_columns = []
for table in tables:
queries = []
LOGGER.info('Getting column information for %s...', table)
# Get column data types by SHOW commands
show_columns = f'SHOW COLUMNS IN TABLE {table}'
# Convert output of SHOW commands to tables and use SQL joins to get every required information
select = """
WITH
show_columns AS (SELECT * FROM TABLE(RESULT_SCAN(%(LAST_QID)s)))
SELECT show_columns."database_name" AS table_catalog
,show_columns."schema_name" AS table_schema
,show_columns."table_name" AS table_name
,show_columns."column_name" AS column_name
-- ----------------------------------------------------------------------------------------
-- Character and numeric columns display their generic data type rather than their defined
-- data type (i.e. TEXT for all character types, FIXED for all fixed-point numeric types,
-- and REAL for all floating-point numeric types).
--
-- Further info at https://docs.snowflake.net/manuals/sql-reference/sql/show-columns.html
-- ----------------------------------------------------------------------------------------
,CASE PARSE_JSON(show_columns."data_type"):type::varchar
WHEN 'FIXED' THEN 'NUMBER'
WHEN 'REAL' THEN 'FLOAT'
ELSE PARSE_JSON("data_type"):type::varchar
END data_type
,PARSE_JSON(show_columns."data_type"):length::number AS character_maximum_length
,PARSE_JSON(show_columns."data_type"):precision::number AS numeric_precision
,PARSE_JSON(show_columns."data_type"):scale::number AS numeric_scale
FROM show_columns
"""
queries.extend([show_columns, select])
# Run everything in one transaction
columns = snowflake_conn.query(queries, max_records=SHOW_COMMAND_MAX_ROWS)
table_columns.extend(columns)
return table_columns
def discover_catalog(snowflake_conn, config):
"""Returns a Catalog describing the structure of the database."""
tables = config.get('tables').split(',')
sql_columns = get_table_columns(snowflake_conn, tables)
table_info = {}
columns = []
for sql_col in sql_columns:
catalog = sql_col['TABLE_CATALOG']
schema = sql_col['TABLE_SCHEMA']
table_name = sql_col['TABLE_NAME']
if catalog not in table_info:
table_info[catalog] = {}
if schema not in table_info[catalog]:
table_info[catalog][schema] = {}
table_info[catalog][schema][table_name] = {
'row_count': sql_col.get('ROW_COUNT'),
'is_view': sql_col.get('TABLE_TYPE') == 'VIEW'
}
columns.append(Column(
table_catalog=catalog,
table_schema=schema,
table_name=table_name,
column_name=sql_col['COLUMN_NAME'],
data_type=sql_col['DATA_TYPE'],
character_maximum_length=sql_col['CHARACTER_MAXIMUM_LENGTH'],
numeric_precision=sql_col['NUMERIC_PRECISION'],
numeric_scale=sql_col['NUMERIC_SCALE']
))
entries = []
for (k, cols) in itertools.groupby(columns, lambda c: (c.table_catalog, c.table_schema, c.table_name)):
cols = list(cols)
(table_catalog, table_schema, table_name) = k
schema = Schema(type='object',
properties={c.column_name: schema_for_column(c) for c in cols})
md = create_column_metadata(cols)
md_map = metadata.to_map(md)
md_map = metadata.write(md_map, (), 'database-name', table_catalog)
md_map = metadata.write(md_map, (), 'schema-name', table_schema)
if (
table_catalog in table_info and
table_schema in table_info[table_catalog] and
table_name in table_info[table_catalog][table_schema]
):
# Row Count of views returns NULL - Transform it to not null integer by defaults to 0
row_count = table_info[table_catalog][table_schema][table_name].get('row_count', 0) or 0
is_view = table_info[table_catalog][table_schema][table_name]['is_view']
md_map = metadata.write(md_map, (), 'row-count', row_count)
md_map = metadata.write(md_map, (), 'is-view', is_view)
entry = CatalogEntry(
table=table_name,
stream=table_name,
metadata=metadata.to_list(md_map),
tap_stream_id=common.generate_tap_stream_id(table_catalog, table_schema, table_name),
schema=schema)
entries.append(entry)
return Catalog(entries)
def do_discover(snowflake_conn, config):
discover_catalog(snowflake_conn, config).dump()
# pylint: disable=fixme
# TODO: Maybe put in a singer-db-utils library.
def desired_columns(selected, table_schema):
"""Return the set of column names we need to include in the SELECT.
selected - set of column names marked as selected in the input catalog
table_schema - the most recently discovered Schema for the table
"""
all_columns = set()
available = set()
automatic = set()
unsupported = set()
for column, column_schema in table_schema.properties.items():
all_columns.add(column)
inclusion = column_schema.inclusion
if inclusion == 'automatic':
automatic.add(column)
elif inclusion == 'available':
available.add(column)
elif inclusion == 'unsupported':
unsupported.add(column)
else:
raise Exception('Unknown inclusion ' + inclusion)
selected_but_unsupported = selected.intersection(unsupported)
if selected_but_unsupported:
LOGGER.warning(
'Columns %s were selected but are not supported. Skipping them.',
selected_but_unsupported)
selected_but_nonexistent = selected.difference(all_columns)
if selected_but_nonexistent:
LOGGER.warning(
'Columns %s were selected but do not exist.',
selected_but_nonexistent)
not_selected_but_automatic = automatic.difference(selected)
if not_selected_but_automatic:
LOGGER.warning(
'Columns %s are primary keys but were not selected. Adding them.',
not_selected_but_automatic)
return selected.intersection(available).union(automatic)
def resolve_catalog(discovered_catalog, streams_to_sync):
result = Catalog(streams=[])
# Iterate over the streams in the input catalog and match each one up
# with the same stream in the discovered catalog.
for catalog_entry in streams_to_sync:
catalog_metadata = metadata.to_map(catalog_entry.metadata)
replication_key = catalog_metadata.get((), {}).get('replication-key')
discovered_table = discovered_catalog.get_stream(catalog_entry.tap_stream_id)
database_name = common.get_database_name(catalog_entry)
if not discovered_table:
LOGGER.warning('Database %s table %s was selected but does not exist',
database_name, catalog_entry.table)
continue
selected = {k for k, v in catalog_entry.schema.properties.items()
if common.property_is_selected(catalog_entry, k) or k == replication_key}
# These are the columns we need to select
columns = desired_columns(selected, discovered_table.schema)
result.streams.append(CatalogEntry(
tap_stream_id=catalog_entry.tap_stream_id,
metadata=catalog_entry.metadata,
stream=catalog_entry.tap_stream_id,
table=catalog_entry.table,
schema=Schema(
type='object',
properties={col: discovered_table.schema.properties[col]
for col in columns}
)
))
return result
def get_streams(snowflake_conn, catalog, config, state):
"""Returns the Catalog of data we're going to sync for all SELECT-based
streams (i.e. INCREMENTAL and FULL_TABLE that require a historical
sync).
Using the Catalog provided from the input file, this function will return a
Catalog representing exactly which tables and columns that will be emitted
by SELECT-based syncs. This is achieved by comparing the input Catalog to a
freshly discovered Catalog to determine the resulting Catalog.
The resulting Catalog will include the following any streams marked as
"selected" that currently exist in the database. Columns marked as "selected"
and those labled "automatic" (e.g. primary keys and replication keys) will be
included. Streams will be prioritized in the following order:
1. currently_syncing if it is SELECT-based
2. any streams that do not have state
3. any streams that do not have a replication method of LOG_BASED
"""
discovered = discover_catalog(snowflake_conn, config)
# Filter catalog to include only selected streams
# pylint: disable=unnecessary-lambda
selected_streams = list(filter(lambda s: common.stream_is_selected(s), catalog.streams))
streams_with_state = []
streams_without_state = []
for stream in selected_streams:
stream_state = state.get('bookmarks', {}).get(stream.tap_stream_id)
if not stream_state:
streams_without_state.append(stream)
else:
streams_with_state.append(stream)
# If the state says we were in the middle of processing a stream, skip
# to that stream. Then process streams without prior state and finally
# move onto streams with state (i.e. have been synced in the past)
currently_syncing = singer.get_currently_syncing(state)
# prioritize streams that have not been processed
ordered_streams = streams_without_state + streams_with_state
if currently_syncing:
currently_syncing_stream = list(filter(
lambda s: s.tap_stream_id == currently_syncing, streams_with_state))
non_currently_syncing_streams = list(filter(lambda s: s.tap_stream_id != currently_syncing, ordered_streams))
streams_to_sync = currently_syncing_stream + non_currently_syncing_streams
else:
# prioritize streams that have not been processed
streams_to_sync = ordered_streams
return resolve_catalog(discovered, streams_to_sync)
def write_schema_message(catalog_entry, bookmark_properties=None):
key_properties = common.get_key_properties(catalog_entry)
singer.write_message(singer.SchemaMessage(
stream=catalog_entry.stream,
schema=catalog_entry.schema.to_dict(),
key_properties=key_properties,
bookmark_properties=bookmark_properties
))
def do_sync_incremental(snowflake_conn, catalog_entry, state, columns):
LOGGER.info('Stream %s is using incremental replication', catalog_entry.stream)
md_map = metadata.to_map(catalog_entry.metadata)
replication_key = md_map.get((), {}).get('replication-key')
if not replication_key:
raise Exception(f'Cannot use INCREMENTAL replication for table ({catalog_entry.stream}) without a replication '
f'key.')
write_schema_message(catalog_entry=catalog_entry,
bookmark_properties=[replication_key])
incremental.sync_table(snowflake_conn, catalog_entry, state, columns)
singer.write_message(singer.StateMessage(value=copy.deepcopy(state)))
def do_sync_full_table(snowflake_conn, catalog_entry, state, columns):
LOGGER.info('Stream %s is using full table replication', catalog_entry.stream)
write_schema_message(catalog_entry)
stream_version = common.get_stream_version(catalog_entry.tap_stream_id, state)
full_table.sync_table(snowflake_conn, catalog_entry, state, columns, stream_version)
# Prefer initial_full_table_complete going forward
singer.clear_bookmark(state, catalog_entry.tap_stream_id, 'version')
state = singer.write_bookmark(state,
catalog_entry.tap_stream_id,
'initial_full_table_complete',
True)
singer.write_message(singer.StateMessage(value=copy.deepcopy(state)))
def sync_streams(snowflake_conn, catalog, state):
for catalog_entry in catalog.streams:
columns = list(catalog_entry.schema.properties.keys())
if not columns:
LOGGER.warning('There are no columns selected for stream %s, skipping it.', catalog_entry.stream)
continue
state = singer.set_currently_syncing(state, catalog_entry.tap_stream_id)
# Emit a state message to indicate that we've started this stream
singer.write_message(singer.StateMessage(value=copy.deepcopy(state)))
md_map = metadata.to_map(catalog_entry.metadata)
replication_method = md_map.get((), {}).get('replication-method')
database_name = common.get_database_name(catalog_entry)
schema_name = common.get_schema_name(catalog_entry)
with metrics.job_timer('sync_table') as timer:
timer.tags['database'] = database_name
timer.tags['table'] = catalog_entry.table
LOGGER.info('Beginning to sync %s.%s.%s', database_name, schema_name, catalog_entry.table)
if replication_method == 'INCREMENTAL':
do_sync_incremental(snowflake_conn, catalog_entry, state, columns)
elif replication_method == 'FULL_TABLE':
do_sync_full_table(snowflake_conn, catalog_entry, state, columns)
else:
raise Exception('Only INCREMENTAL and FULL TABLE replication methods are supported')
state = singer.set_currently_syncing(state, None)
singer.write_message(singer.StateMessage(value=copy.deepcopy(state)))
def do_sync(snowflake_conn, config, catalog, state):
catalog = get_streams(snowflake_conn, catalog, config, state)
sync_streams(snowflake_conn, catalog, state)
def main_impl():
args = utils.parse_args(REQUIRED_CONFIG_KEYS)
snowflake_conn = SnowflakeConnection(args.config)
if args.discover:
do_discover(snowflake_conn, args.config)
elif args.catalog:
state = args.state or {}
do_sync(snowflake_conn, args.config, args.catalog, state)
elif args.properties:
catalog = Catalog.from_dict(args.properties)
state = args.state or {}
do_sync(snowflake_conn, args.config, catalog, state)
else:
LOGGER.info('No properties were selected')
def main():
try:
main_impl()
except Exception as exc:
LOGGER.critical(exc)
raise exc
| 37.817822 | 152 | 0.659807 |
466f9e7a695627b40475eadbff69030fac7a1fa1 | 58 | py | Python | src/agent/docker-rest-agent/gunicorn.conf.py | tianxuanhong/cello | 62aff7bf3038491d5ecacdd9a57946a9ae8a958d | [
"Apache-2.0"
] | 865 | 2017-01-12T21:51:37.000Z | 2022-03-26T16:39:16.000Z | src/agent/docker-rest-agent/gunicorn.conf.py | tianxuanhong/cello | 62aff7bf3038491d5ecacdd9a57946a9ae8a958d | [
"Apache-2.0"
] | 226 | 2017-02-06T08:36:24.000Z | 2022-03-30T06:13:46.000Z | src/agent/docker-rest-agent/gunicorn.conf.py | tianxuanhong/cello | 62aff7bf3038491d5ecacdd9a57946a9ae8a958d | [
"Apache-2.0"
] | 506 | 2017-02-08T06:11:18.000Z | 2022-03-10T04:25:25.000Z | workers = 1
worker_class = "gevent"
bind = "0.0.0.0:5001"
| 14.5 | 23 | 0.655172 |
d68c8871028ecc7f02a38a9bb075d8dbaeb3a4a1 | 2,380 | py | Python | setup.py | sjin09/alleleCounter | a5edd7bd98675ac4e0d85f7f7d2de1b71470da92 | [
"MIT"
] | null | null | null | setup.py | sjin09/alleleCounter | a5edd7bd98675ac4e0d85f7f7d2de1b71470da92 | [
"MIT"
] | 51 | 2021-08-10T04:27:44.000Z | 2022-03-31T04:19:30.000Z | setup.py | sjin09/alleleCounter | a5edd7bd98675ac4e0d85f7f7d2de1b71470da92 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE!
# This file has been autogenerated by dephell <3
# https://github.com/dephell/dephell
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
readme = ''
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, 'README.rst')
if os.path.exists(readme_path):
with open(readme_path, 'rb') as stream:
readme = stream.read().decode('utf8')
setup(
long_description=readme,
name='alleleCounter',
version='0.0.1',
description='alleleCounter',
python_requires='==3.*,>=3.6.1',
project_urls={
"documentation": "https://alleleCounter.readthedocs.io",
"homepage": "https://github.com/sjin09/alleleCounter",
"repository": "https://github.com/sjin09/alleleCounter"
},
author='Sangjin Lee',
author_email='sl17@sanger.ac.uk',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
entry_points={
"console_scripts": ["alleleCounter = alleleCounter.__main__:main"]
},
packages=['alleleCounter'],
package_dir={"": "src"},
package_data={"alleleCounter": ["*.typed"]},
install_requires=[
'argparse==1.*,>=1.4.0', 'biopython==1.*,>=1.79.0',
'click==7.*,>=7.0.0', 'natsort==7.*,>=7.1.1', 'pysam==0.*,>=0.16.0'
],
extras_require={
"dev": [
"black==20.*,>=20.8.0.b1", "coverage[toml]==5.*,>=5.3.0",
"darglint==1.*,>=1.5.8", "flake8==3.*,>=3.8.4",
"flake8-bandit==2.*,>=2.1.2", "flake8-bugbear==20.*,>=20.1.4",
"flake8-docstrings==1.*,>=1.5.0",
"flake8-rst-docstrings==0.*,>=0.0.14", "mypy==0.*,>=0.790.0",
"pep8-naming==0.*,>=0.11.1", "pre-commit==2.*,>=2.8.2",
"pre-commit-hooks==3.*,>=3.3.0", "pygments==2.*,>=2.7.2",
"pytest==6.*,>=6.1.2", "reorder-python-imports==2.*,>=2.3.6",
"safety==1.*,>=1.9.0", "sphinx==3.*,>=3.3.1",
"sphinx-autobuild==2020.*,>=2020.9.1", "sphinx-click==2.*,>=2.5.0",
"sphinx-rtd-theme==0.*,>=0.5.0", "typeguard==2.*,>=2.9.1",
"xdoctest[colors]==0.*,>=0.15.0"
]
},
)
| 35 | 79 | 0.548319 |
2d5a819d756ccdd9937e7d5ebc018ab360c496da | 20,036 | py | Python | im2col_funcs.py | Mxbonn/zigzag_fork | 250ee5e22904ba846dfb106983d46b83bd9ee230 | [
"BSD-3-Clause"
] | 34 | 2020-08-11T14:38:29.000Z | 2022-03-30T10:32:34.000Z | im2col_funcs.py | Mxbonn/zigzag_fork | 250ee5e22904ba846dfb106983d46b83bd9ee230 | [
"BSD-3-Clause"
] | 9 | 2020-11-16T19:19:48.000Z | 2022-03-31T18:29:24.000Z | im2col_funcs.py | Mxbonn/zigzag_fork | 250ee5e22904ba846dfb106983d46b83bd9ee230 | [
"BSD-3-Clause"
] | 18 | 2020-08-24T07:26:51.000Z | 2022-01-06T00:59:11.000Z | from copy import deepcopy
from numpy import prod
from math import ceil
def im2col_layer_transform(layer_info):
im2col_layer_info = {}
for layer_index, layer in layer_info.items():
# TODO support stride under im2col mode
im2col_layer_info[layer_index] = {'B': 1, 'K': 1, 'C': 1, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1,
'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0}
im2col_layer_info[layer_index]['B'] = layer['B'] * layer['OY'] * layer['OX']
im2col_layer_info[layer_index]['K'] = layer['K']
im2col_layer_info[layer_index]['C'] = layer['C'] * layer['FY'] * layer['FX']
return im2col_layer_info
def im2col_mem_access_correction(layer_origin, layer_im2col, mem_total_access, temporal_loop, spatial_loop,
im2col_top_mem_level):
# TODO This is just a temporary solution for a constraint architecture (FB-similar),
# in which only the top memory level for I may not do im2col, thus may need to be corrected) & stride = 1.
C_pre_unrolled = 2
I_mem_level = len(temporal_loop.B['I'])
B_tot = deepcopy(spatial_loop.Bu['I'])
K_tot = deepcopy(spatial_loop.Ku['I'])
C_tot = deepcopy(spatial_loop.Cu['I'])
for level in range(I_mem_level):
B_tot[level + 1] *= temporal_loop.B['I'][level]
K_tot[level + 1] *= temporal_loop.K['I'][level]
C_tot[level + 1] *= temporal_loop.C['I'][level]
B_below = prod(B_tot[0:im2col_top_mem_level + 1]).item()
K_below = prod(K_tot[0:im2col_top_mem_level + 1]).item()
C_below = prod(C_tot[0:im2col_top_mem_level + 1]).item()
B_L, OY_L, OX_L = B_col2im_decouple(B_below, layer_origin)
K_L = K_below
C_below /= C_pre_unrolled
C_L, FY_L, FX_L = C_col2im_decouple(C_below, layer_origin)
C_L *= C_pre_unrolled
B_H = layer_origin.B / B_L
K_H = layer_origin.K / K_L
C_H = layer_origin.C / C_L
OY_H = layer_origin.OY / OY_L
OX_H = layer_origin.OX / OX_L
FY_H = layer_origin.FY / FY_L
FX_H = layer_origin.FX / FX_L
cycle_L = B_L * K_L * C_L * OY_L * OX_L * FY_L * FX_L
I_data_size_L = B_L* C_L * (OY_L + FY_L - 1) * (OX_L + FX_L - 1)
I_data_reuse_L = cycle_L / I_data_size_L
I_data_reuse_tot = layer_origin.total_data_reuse['I']
I_data_reuse_H = I_data_reuse_tot/I_data_reuse_L
a=1
def B_col2im_decouple(B_below, layer_origin):
B = 1
OY = 1
OX = 1
B_below_origin = B_below
if B_below == 1:
return B, OY, OX
else:
if B_below <= layer_origin.OX:
OX = B_below
return B, OY, OX
else:
OX = layer_origin.OX
B_below /= OX
if B_below <= layer_origin.OY:
OY = ceil(B_below)
OX = B_below_origin / OY
return B, OY, OX
else:
OY = layer_origin.OY
B_below /= OY
if B_below <= layer_origin.B:
B = ceil(B_below)
OX = B_below_origin / B / OY
return B, OY, OX
def C_col2im_decouple(C_below, layer_origin):
C = 1
FY = 1
FX = 1
C_below_origin = C_below
if C_below == 1:
return C, FY, FX
else:
if C_below <= layer_origin.FX:
FX = C_below
return C, FY, FX
else:
FX = layer_origin.FX
C_below /= FX
if C_below <= layer_origin.FY:
FY = ceil(C_below)
FX = C_below_origin / FY
return C, FY, FX
else:
FY = layer_origin.FY
C_below /= FY
if C_below <= layer_origin.C:
C = ceil(C_below)
FX = C_below_origin / C / FY
return C, FY, FX
# def su_col2im(mem_scheme, layer_7D_origin, layer_3D_origin, layer_3D_rounded):
# """
# This function updates col2im parameters in mem_scheme, namely,
# col2im_flooring, col2im_fraction_spatial_unrolling, col2im_spatial_unrolling.
# These parameters will later be used to calculate accurate Input access count for
# those Input memory levels above the im2col_top_mem_level (defined in setting file),
# which can get benefit from Input FIFO effect.
# """
#
# ideal_su = mem_scheme.spatial_unrolling
# fraction_su = mem_scheme.fraction_spatial_unrolling
# flooring = mem_scheme.flooring
#
# col2im_ideal_su = {'W': [], 'I': [], 'O': []}
# col2im_fraction_su = {'W': [], 'I': [], 'O': []}
# col2im_flooring = {'W': [], 'I': [], 'O': []}
#
# for ii_su in range(len(ideal_su)):
# for op in ['W', 'I', 'O']:
# for su_per_level in ideal_su[ii_su][op]:
# col2im_ideal_su[op].append([])
# if su_per_level:
# for su_single in su_per_level:
# su_type = su_single[0]
# if su_type == 6:
# col2im_ideal_su[op][-1].append(su_single)
# else:
# su_single_update = su_single_decouple(su_single, layer_7D_origin)
# col2im_ideal_su[op][-1].append(su_single_update)
#
# a = 1
def pw_layer_col2im(spatial_scheme, flooring, temporal_scheme, original_layer):
"""
This function change a pointwise layer, which has been auto-transferred (im2col), back to its original shape.
Recover 3D (B, K, C) back to 5D (B, K, C, OY, OX) in spatial_scheme, flooring, temporal_scheme.
"""
OX = {'W': original_layer[3], 'I': original_layer[3], 'O': original_layer[3]}
OY = {'W': original_layer[4], 'I': original_layer[4], 'O': original_layer[4]}
B = {'W': original_layer[7], 'I': original_layer[7], 'O': original_layer[7]}
# su_transfer_count is used to convert flooring, 7 -> 3 or 3,4 or 3,4,7
su_transfer_op = {'W': [], 'I': [], 'O': []}
spatial_scheme_saved = deepcopy(spatial_scheme)
for op in ['W', 'I', 'O']:
for level, su_list in enumerate(spatial_scheme_saved[op]):
su_transfer_op[op].append([])
if su_list:
for idx, su_single in enumerate(su_list):
if su_single[0] == 7:
su_transfer_op[op][-1].append([])
if su_single[1] <= OX[op]:
find_7_item = next((x for x in spatial_scheme[op][level] if x[0] == 7), None)
find_7_idx = spatial_scheme[op][level].index(find_7_item)
OX_position_value = spatial_scheme_saved[op][level][idx][1]
spatial_scheme[op][level].insert(find_7_idx, [3, OX_position_value])
su_transfer_op[op][-1][-1] = [3] # B -> OX
try:
spatial_scheme[op][level].remove((7, su_single[1]))
except:
spatial_scheme[op][level].remove([7, su_single[1]])
OX[op] = round(OX[op] / OX_position_value)
elif OX[op] < su_single[1] < OX[op] * OY[op]:
if OX[op] > 1:
find_7_item = next((x for x in spatial_scheme[op][level] if x[0] == 7), None)
find_7_idx = spatial_scheme[op][level].index(find_7_item)
spatial_scheme[op][level].insert(find_7_idx, [3, OX[op]])
OY_posision_value = round(spatial_scheme_saved[op][level][idx][1] / OX[op])
spatial_scheme[op][level].insert(find_7_idx + 1, [4, OY_posision_value])
su_transfer_op[op][-1][-1] = [3, 4] # B -> OX, OY
try:
spatial_scheme[op][level].remove((7, su_single[1]))
except:
spatial_scheme[op][level].remove([7, su_single[1]])
else:
find_7_item = next((x for x in spatial_scheme[op][level] if x[0] == 7), None)
find_7_idx = spatial_scheme[op][level].index(find_7_item)
OY_posision_value = spatial_scheme_saved[op][level][idx][1]
spatial_scheme[op][level].insert(find_7_idx, [4, OY_posision_value])
su_transfer_op[op][-1][-1] = [4] # B -> OY
try:
spatial_scheme[op][level].remove((7, su_single[1]))
except:
spatial_scheme[op][level].remove([7, su_single[1]])
OX[op] = 1
OY[op] = round(OY[op] / OY_posision_value)
elif su_single[1] == OX[op] * OY[op]:
if OX[op] > 1:
find_7_item = next((x for x in spatial_scheme[op][level] if x[0] == 7), None)
find_7_idx = spatial_scheme[op][level].index(find_7_item)
spatial_scheme[op][level].insert(find_7_idx, [3, OX[op]])
spatial_scheme[op][level].insert(find_7_idx + 1, [4, OY[op]])
su_transfer_op[op][-1][-1] = [3, 4] # B -> OX, OY
try:
spatial_scheme[op][level].remove((7, su_single[1]))
except:
spatial_scheme[op][level].remove([7, su_single[1]])
else:
find_7_item = next((x for x in spatial_scheme[op][level] if x[0] == 7), None)
find_7_idx = spatial_scheme[op][level].index(find_7_item)
spatial_scheme[op][level].insert(find_7_idx, [4, OY[op]])
su_transfer_op[op][-1][-1] = [4] # B -> OY
try:
spatial_scheme[op][level].remove((7, su_single[1]))
except:
spatial_scheme[op][level].remove([7, su_single[1]])
OX[op] = 1
OY[op] = 1
elif su_single[1] > OX[op] * OY[op]:
if OX[op] > 1 and OY[op] > 1:
find_7_item = next((x for x in spatial_scheme[op][level] if x[0] == 7), None)
find_7_idx = spatial_scheme[op][level].index(find_7_item)
spatial_scheme[op][level].insert(find_7_idx, [3, OX[op]])
spatial_scheme[op][level].insert(find_7_idx + 1, [4, OY[op]])
B_posision_value = round(spatial_scheme_saved[op][level][idx][1] / OX[op] / OY[op])
spatial_scheme[op][level].insert(find_7_idx + 2, [7, B_posision_value])
su_transfer_op[op][-1][-1] = [3, 4, 7] # B -> OX, OY, B
try:
spatial_scheme[op][level].remove((7, su_single[1]))
except:
spatial_scheme[op][level].remove([7, su_single[1]])
elif OX[op] == 1 and OY[op] > 1:
find_7_item = next((x for x in spatial_scheme[op][level] if x[0] == 7), None)
find_7_idx = spatial_scheme[op][level].index(find_7_item)
spatial_scheme[op][level].insert(find_7_idx, [4, OY[op]])
B_posision_value = round(spatial_scheme_saved[op][level][idx][1] / OY[op])
spatial_scheme[op][level].insert(find_7_idx + 1, [7, B_posision_value])
su_transfer_op[op][-1][-1] = [4, 7] # B -> OY, B
try:
spatial_scheme[op][level].remove((7, su_single[1]))
except:
spatial_scheme[op][level].remove([7, su_single[1]])
elif OX[op] == 1 and OY[op] == 1:
B_posision_value = spatial_scheme_saved[op][level][idx][1]
su_transfer_op[op][-1][-1] = [7] # B -> B
else:
raise ValueError('ERROR 1 (su)')
OX[op] = 1
OY[op] = 1
B[op] = round(B[op] / B_posision_value)
else:
raise ValueError('ERROR 2 (su)')
if B['W'] != B['I'] != B['O'] or OY['W'] != OY['I'] != OY['O'] or OX['W'] != OX['I'] != OX['O']:
raise ValueError('ERROR 3')
flooring_saved = deepcopy(flooring)
for op in ['W', 'I', 'O']:
for level, floor_list in enumerate(flooring_saved[op]):
i = 0
for XY, floor_XY in enumerate(floor_list):
for floor_single in floor_XY:
if floor_single == 7:
find_7_idx = flooring[op][level][XY].index(7)
for x in reversed(su_transfer_op[op][level][i]):
flooring[op][level][XY].insert(find_7_idx, x)
i += 1
flooring[op][level][XY].remove(7)
temporal_scheme_saved = deepcopy(temporal_scheme)
for op in ['W', 'I', 'O']:
for level, loop_list in enumerate(temporal_scheme_saved[op]):
# su_transfer_op[op].append([])
if loop_list:
for idx, loop_single in enumerate(loop_list):
if loop_single[0] == 7:
# su_transfer_op[op][-1].append([])
if loop_single[1] <= OX[op]:
find_7_item = next((x for x in temporal_scheme[op][level] if x[0] == 7), None)
find_7_idx = temporal_scheme[op][level].index(find_7_item)
OX_position_value = temporal_scheme_saved[op][level][idx][1]
temporal_scheme[op][level].insert(find_7_idx, (3, OX_position_value))
# su_transfer_op[op][-1][-1] = [3] # B -> OX
temporal_scheme[op][level].remove((7, loop_single[1]))
OX[op] = round(OX[op] / OX_position_value)
elif OX[op] < loop_single[1] < OX[op] * OY[op]:
if OX[op] > 1:
find_7_item = next((x for x in temporal_scheme[op][level] if x[0] == 7), None)
find_7_idx = temporal_scheme[op][level].index(find_7_item)
temporal_scheme[op][level].insert(find_7_idx, (3, OX[op]))
OY_posision_value = round(temporal_scheme_saved[op][level][idx][1] / OX[op])
temporal_scheme[op][level].insert(find_7_idx + 1, (4, OY_posision_value))
# su_transfer_op[op][-1][-1] = [3, 4] # B -> OX, OY
temporal_scheme[op][level].remove((7, loop_single[1]))
else:
find_7_item = next((x for x in temporal_scheme[op][level] if x[0] == 7), None)
find_7_idx = temporal_scheme[op][level].index(find_7_item)
OY_posision_value = temporal_scheme_saved[op][level][idx][1]
temporal_scheme[op][level].insert(find_7_idx, (4, OY_posision_value))
# su_transfer_op[op][-1][-1] = [4] # B -> OY
temporal_scheme[op][level].remove((7, loop_single[1]))
OX[op] = 1
OY[op] = round(OY[op] / OY_posision_value)
elif loop_single[1] == OX[op] * OY[op]:
if OX[op] > 1:
find_7_item = next((x for x in temporal_scheme[op][level] if x[0] == 7), None)
find_7_idx = temporal_scheme[op][level].index(find_7_item)
temporal_scheme[op][level].insert(find_7_idx, (3, OX[op]))
temporal_scheme[op][level].insert(find_7_idx + 1, (4, OY[op]))
# su_transfer_op[op][-1][-1] = [3, 4] # B -> OX, OY
temporal_scheme[op][level].remove((7, loop_single[1]))
else:
find_7_item = next((x for x in temporal_scheme[op][level] if x[0] == 7), None)
find_7_idx = temporal_scheme[op][level].index(find_7_item)
temporal_scheme[op][level].insert(find_7_idx, (4, OY[op]))
# su_transfer_op[op][-1][-1] = [4] # B -> OY
temporal_scheme[op][level].remove((7, loop_single[1]))
OX[op] = 1
OY[op] = 1
elif loop_single[1] > OX[op] * OY[op]:
if OX[op] > 1 and OY[op] > 1:
find_7_item = next((x for x in temporal_scheme[op][level] if x[0] == 7), None)
find_7_idx = temporal_scheme[op][level].index(find_7_item)
temporal_scheme[op][level].insert(find_7_idx, (3, OX[op]))
temporal_scheme[op][level].insert(find_7_idx + 1, (4, OY[op]))
B_posision_value = round(temporal_scheme_saved[op][level][idx][1] / OX[op] / OY[op])
temporal_scheme[op][level].insert(find_7_idx + 2, (7, B_posision_value))
# su_transfer_op[op][-1][-1] = [3, 4, 7] # B -> OX, OY, B
temporal_scheme[op][level].remove((7, loop_single[1]))
elif OX[op] == 1 and OY[op] > 1:
find_7_item = next((x for x in temporal_scheme[op][level] if x[0] == 7), None)
find_7_idx = temporal_scheme[op][level].index(find_7_item)
temporal_scheme[op][level].insert(find_7_idx, (4, OY[op]))
B_posision_value = round(temporal_scheme_saved[op][level][idx][1] / OY[op])
temporal_scheme[op][level].insert(find_7_idx + 1, (7, B_posision_value))
# su_transfer_op[op][-1][-1] = [4, 7] # B -> OY, B
temporal_scheme[op][level].remove((7, loop_single[1]))
elif OX[op] == 1 and OY[op] == 1:
B_posision_value = temporal_scheme_saved[op][level][idx][1]
# su_transfer_op[op][-1][-1] = [7] # B -> B
else:
raise ValueError('ERROR 1 (tm)')
OX[op] = 1
OY[op] = 1
B[op] = round(B[op] / B_posision_value)
else:
raise ValueError('ERROR 2 (tm)')
if not (B['W'] == B['I'] == B['O'] == 1 and OY['W'] == OY['I'] == OY['O'] == 1 and OX['W'] == OX['I'] == OX['O'] == 1):
raise ValueError('ERROR 4')
return spatial_scheme, flooring, temporal_scheme | 53.715818 | 123 | 0.468706 |
26155edda96c4d8dd3d9585f39f9237e1b458c1d | 1,915 | py | Python | mlprodict/npy/onnx_version.py | sdpython/mlprodic | 9367dacc91d35ec670c8a8a76708300a75bbc993 | [
"MIT"
] | 32 | 2018-03-04T23:33:30.000Z | 2022-03-10T19:15:06.000Z | mlprodict/npy/onnx_version.py | sdpython/mlprodic | 9367dacc91d35ec670c8a8a76708300a75bbc993 | [
"MIT"
] | 184 | 2017-11-30T14:10:35.000Z | 2022-02-21T08:29:31.000Z | mlprodict/npy/onnx_version.py | sdpython/mlprodic | 9367dacc91d35ec670c8a8a76708300a75bbc993 | [
"MIT"
] | 9 | 2019-07-24T13:18:00.000Z | 2022-03-07T04:08:07.000Z | """
@file
@brief Identifies a version of a function.
.. versionadded:: 0.6
"""
from collections import namedtuple
class FctVersion(namedtuple("_version_", ['args', 'kwargs'])):
"""
Identifies a version of a function based on its
arguments and its parameters.
"""
__slots__ = ()
def _check_(self):
if self.args is not None and not isinstance(self.args, tuple):
raise TypeError("args must be None or a tuple.")
if self.kwargs is not None and not isinstance(self.kwargs, tuple):
raise TypeError("kwargs must None or be a tuple.")
def __repr__(self):
"usual"
def cl(s):
return str(s).replace("<class '", "").replace("'>", "")
if self.args is None:
sa = "None"
else:
sa = ",".join(map(cl, self.args))
sa = ("(%s)" % sa) if len(self.args) > 1 else ("(%s,)" % sa)
return "%s(%s, %s)" % (
self.__class__.__name__, sa, self.kwargs)
def __len__(self):
"Returns the sum of lengths."
return ((0 if self.args is None else len(self.args)) +
(0 if self.kwargs is None else len(self.kwargs)))
def as_tuple(self):
"Returns a single tuple for the version."
return ((tuple() if self.args is None else self.args) +
(tuple() if self.kwargs is None else self.kwargs))
def as_tuple_with_sep(self, sep):
"Returns a single tuple for the version."
return ((tuple() if self.args is None else self.args) +
(sep, ) +
(tuple() if self.kwargs is None else self.kwargs))
def as_string(self):
"Returns a single stirng identifier."
val = "_".join(map(str, self.as_tuple_with_sep("_")))
val = val.replace("<class 'numpy.", "").replace(
'.', "_").replace("'>", "").replace(" ", "")
return val.lower()
| 33.017241 | 74 | 0.557702 |
7118ff77f124a682db3c8f468cf3c343ed70d8ce | 10,058 | py | Python | dragoman/client.py | sumitkhamar22/dragoman | d03e896b71a2f3a8ced56afdba875d76a3fd321e | [
"MIT"
] | null | null | null | dragoman/client.py | sumitkhamar22/dragoman | d03e896b71a2f3a8ced56afdba875d76a3fd321e | [
"MIT"
] | null | null | null | dragoman/client.py | sumitkhamar22/dragoman | d03e896b71a2f3a8ced56afdba875d76a3fd321e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
A Translation module.
You can translate text using this module.
"""
import random
import typing
import httpcore
import httpx
from httpx import Timeout
from dragoman import urls, utils
from dragoman.gtoken import TokenAcquirer
from dragoman.constants import (
DEFAULT_USER_AGENT, LANGCODES, LANGUAGES, SPECIAL_CASES,
DEFAULT_RAISE_EXCEPTION, DUMMY_DATA
)
from dragoman.models import Translated, Detected
EXCLUDES = ('en', 'ca', 'fr')
class Translator:
"""Google Translate ajax API implementation class
You have to create an instance of Translator to use this API
:param service_urls: google translate url list. URLs will be used randomly.
For example ``['translate.google.com', 'translate.google.co.kr']``
:type service_urls: a sequence of strings
:param user_agent: the User-Agent header to send when making requests.
:type user_agent: :class:`str`
:param proxies: proxies configuration.
Dictionary mapping protocol or protocol and host to the URL of the proxy
For example ``{'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}``
:type proxies: dictionary
:param timeout: Definition of timeout for httpx library.
Will be used for every request.
:type timeout: number or a double of numbers
||||||| constructed merge base
:param proxies: proxies configuration.
Dictionary mapping protocol or protocol and host to the URL of the proxy
For example ``{'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}``
:param raise_exception: if `True` then raise exception if smth will go wrong
:type raise_exception: boolean
"""
def __init__(self, service_urls=None, user_agent=DEFAULT_USER_AGENT,
raise_exception=DEFAULT_RAISE_EXCEPTION,
proxies: typing.Dict[str, httpcore.SyncHTTPTransport] = None, timeout: Timeout = None):
self.client = httpx.Client()
if proxies is not None: # pragma: nocover
self.client.proxies = proxies
self.client.headers.update({
'User-Agent': user_agent,
})
if timeout is not None:
self.client.timeout = timeout
self.service_urls = service_urls or ['translate.google.com']
self.token_acquirer = TokenAcquirer(client=self.client, host=self.service_urls[0])
self.raise_exception = raise_exception
def _pick_service_url(self):
if len(self.service_urls) == 1:
return self.service_urls[0]
return random.choice(self.service_urls)
def _translate(self, text, dest, src, override):
token = self.token_acquirer.do(text)
params = utils.build_params(query=text, src=src, dest=dest,
token=token, override=override)
url = urls.TRANSLATE.format(host=self._pick_service_url())
r = self.client.get(url, params=params)
if r.status_code == 200:
data = utils.format_json(r.text)
return data
else:
if self.raise_exception:
raise Exception('Unexpected status code "{}" from {}'.format(r.status_code, self.service_urls))
DUMMY_DATA[0][0][0] = text
return DUMMY_DATA
def _parse_extra_data(self, data):
response_parts_name_mapping = {
0: 'translation',
1: 'all-translations',
2: 'original-language',
5: 'possible-translations',
6: 'confidence',
7: 'possible-mistakes',
8: 'language',
11: 'synonyms',
12: 'definitions',
13: 'examples',
14: 'see-also',
}
extra = {}
for index, category in response_parts_name_mapping.items():
extra[category] = data[index] if (index < len(data) and data[index]) else None
return extra
def translate(self, text, dest='en', src='auto', **kwargs):
"""Translate text from source language to destination language
:param text: The source text(s) to be translated. Batch translation is supported via sequence input.
:type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
:param dest: The language to translate the source text into.
The value should be one of the language codes listed in :const:`googletrans.LANGUAGES`
or one of the language names listed in :const:`googletrans.LANGCODES`.
:param dest: :class:`str`; :class:`unicode`
:param src: The language of the source text.
The value should be one of the language codes listed in :const:`googletrans.LANGUAGES`
or one of the language names listed in :const:`googletrans.LANGCODES`.
If a language is not specified,
the system will attempt to identify the source language automatically.
:param src: :class:`str`; :class:`unicode`
:rtype: Translated
:rtype: :class:`list` (when a list is passed)
Basic usage:
>>> from googletrans import Translator
>>> translator = Translator()
>>> translator.translate('안녕하세요.')
<Translated src=ko dest=en text=Good evening. pronunciation=Good evening.>
>>> translator.translate('안녕하세요.', dest='ja')
<Translated src=ko dest=ja text=こんにちは。 pronunciation=Kon'nichiwa.>
>>> translator.translate('veritas lux mea', src='la')
<Translated src=la dest=en text=The truth is my light pronunciation=The truth is my light>
Advanced usage:
>>> translations = translator.translate(['The quick brown fox', 'jumps over', 'the lazy dog'], dest='ko')
>>> for translation in translations:
... print(translation.origin, ' -> ', translation.text)
The quick brown fox -> 빠른 갈색 여우
jumps over -> 이상 점프
the lazy dog -> 게으른 개
"""
dest = dest.lower().split('_', 1)[0]
src = src.lower().split('_', 1)[0]
if src != 'auto' and src not in LANGUAGES:
if src in SPECIAL_CASES:
src = SPECIAL_CASES[src]
elif src in LANGCODES:
src = LANGCODES[src]
else:
raise ValueError('invalid source language')
if dest not in LANGUAGES:
if dest in SPECIAL_CASES:
dest = SPECIAL_CASES[dest]
elif dest in LANGCODES:
dest = LANGCODES[dest]
else:
raise ValueError('invalid destination language')
if isinstance(text, list):
result = []
for item in text:
translated = self.translate(item, dest=dest, src=src, **kwargs)
result.append(translated)
return result
origin = text
data = self._translate(text, dest, src, kwargs)
# this code will be updated when the format is changed.
translated = ''.join([d[0] if d[0] else '' for d in data[0]])
extra_data = self._parse_extra_data(data)
# actual source language that will be recognized by Google Translator when the
# src passed is equal to auto.
try:
src = data[2]
except Exception: # pragma: nocover
pass
pron = origin
try:
pron = data[0][1][-2]
except Exception: # pragma: nocover
pass
if pron is None:
try:
pron = data[0][1][2]
except: # pragma: nocover
pass
if dest in EXCLUDES and pron == origin:
pron = translated
# put final values into a new Translated object
result = Translated(src=src, dest=dest, origin=origin,
text=translated, pronunciation=pron, extra_data=extra_data)
return result
def detect(self, text, **kwargs):
"""Detect language of the input text
:param text: The source text(s) whose language you want to identify.
Batch detection is supported via sequence input.
:type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
:rtype: Detected
:rtype: :class:`list` (when a list is passed)
Basic usage:
>>> from googletrans import Translator
>>> translator = Translator()
>>> translator.detect('이 문장은 한글로 쓰여졌습니다.')
<Detected lang=ko confidence=0.27041003>
>>> translator.detect('この文章は日本語で書かれました。')
<Detected lang=ja confidence=0.64889508>
>>> translator.detect('This sentence is written in English.')
<Detected lang=en confidence=0.22348526>
>>> translator.detect('Tiu frazo estas skribita en Esperanto.')
<Detected lang=eo confidence=0.10538048>
Advanced usage:
>>> langs = translator.detect(['한국어', '日本語', 'English', 'le français'])
>>> for lang in langs:
... print(lang.lang, lang.confidence)
ko 1
ja 0.92929292
en 0.96954316
fr 0.043500196
"""
if isinstance(text, list):
result = []
for item in text:
lang = self.detect(item)
result.append(lang)
return result
data = self._translate(text, 'en', 'auto', kwargs)
# actual source language that will be recognized by Google Translator when the
# src passed is equal to auto.
src = ''
confidence = 0.0
try:
src = ''.join(data[8][0])
confidence = data[8][-2][0]
except Exception: # pragma: nocover
pass
result = Detected(lang=src, confidence=confidence)
return result
| 37.390335 | 117 | 0.586399 |
9dbe585053a44bcf69990c1c0b9f03fd6e8c5438 | 196 | py | Python | Episode #16 - Multiple Inheritance/Main.py | Giyar33/OOP | 3845b11124dbfb43b249c40c045cf784c6881497 | [
"MIT"
] | 45 | 2018-07-25T00:43:46.000Z | 2022-02-18T16:57:19.000Z | Episode #16 - Multiple Inheritance/Main.py | Giyar33/OOP | 3845b11124dbfb43b249c40c045cf784c6881497 | [
"MIT"
] | null | null | null | Episode #16 - Multiple Inheritance/Main.py | Giyar33/OOP | 3845b11124dbfb43b249c40c045cf784c6881497 | [
"MIT"
] | 60 | 2018-09-28T03:42:06.000Z | 2022-03-28T15:03:45.000Z | class A:
def method_A(self):
print("ini adalah method A")
class B:
def method_B(self):
print("ini adalah method B")
class C(A,B):
pass
objek = C()
objek.method_A()
objek.method_B()
| 9.8 | 30 | 0.653061 |
f9fa24f0caf22974cfc3a9ac941f4f1f36a1c1a5 | 747 | py | Python | examples/example_callproc.py | adadaptedinc/aiomysql | f2927fd7b1f767d1e2562532682cb810a0a64de5 | [
"MIT"
] | 1,535 | 2015-02-01T23:38:42.000Z | 2022-03-31T11:53:37.000Z | examples/example_callproc.py | adadaptedinc/aiomysql | f2927fd7b1f767d1e2562532682cb810a0a64de5 | [
"MIT"
] | 668 | 2015-02-02T22:04:10.000Z | 2022-03-31T01:44:46.000Z | examples/example_callproc.py | adadaptedinc/aiomysql | f2927fd7b1f767d1e2562532682cb810a0a64de5 | [
"MIT"
] | 299 | 2015-02-17T22:26:44.000Z | 2022-03-17T12:25:55.000Z | import asyncio
import aiomysql
async def test_example(loop):
conn = await aiomysql.connect(host='127.0.0.1', port=3306,
user='root', password='',
db='test_pymysql', loop=loop)
async with conn.cursor() as cur:
await cur.execute('DROP PROCEDURE IF EXISTS myinc;')
await cur.execute("""CREATE PROCEDURE myinc(p1 INT)
BEGIN
SELECT p1 + 1;
END""")
await cur.callproc('myinc', [1])
(ret, ) = await cur.fetchone()
assert 2, ret
print(ret)
conn.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(test_example(loop))
| 27.666667 | 63 | 0.514056 |
a569dd73bf4c737b5da9b60bab3083b5192099d3 | 5,151 | py | Python | weakest_link/game.py | jmattfong/weakest-link | c4dba2b51a7271b83d3cc14b1329836805019671 | [
"Apache-2.0"
] | null | null | null | weakest_link/game.py | jmattfong/weakest-link | c4dba2b51a7271b83d3cc14b1329836805019671 | [
"Apache-2.0"
] | null | null | null | weakest_link/game.py | jmattfong/weakest-link | c4dba2b51a7271b83d3cc14b1329836805019671 | [
"Apache-2.0"
] | null | null | null | from weakest_link.util import wait_for_choice, green, red, dollars, get_random_mean_word, starts_with_vowel, format_time
class WeakestLinkGame :
def __init__(self, players, rounds, final_round) :
self.players = players
self.rounds = rounds
self.final_round = final_round
self.total_bank = 0
self.maximum_bank = 0
self.current_round = 0
# For the API
def get_current_round(self) :
return self.rounds[self.current_round] if self.current_round < len(self.rounds) else self.final_round
def get_current_round_name(self) :
return self.get_current_round().get_name()
def get_players(self) :
return self.players
def get_current_bank(self, color=True) :
if self.current_round >= len(self.rounds) :
return 0
return dollars(self.get_current_round().round_bank, color=color)
def get_total_bank(self, color=True) :
return dollars(self.total_bank, color=False)
def get_bank_links(self) :
if self.current_round >= len(self.rounds) :
return []
return [dollars(link, color=False) for link in self.get_current_round().bank_links]
def get_current_link(self) :
if self.current_round >= len(self.rounds) :
return 0
return self.get_current_round().current_link
def get_current_player_num(self) :
if self.current_round >= len(self.rounds) :
return 0
return self.get_current_round().get_current_player_num()
def get_time_remaining(self) :
if self.current_round >= len(self.rounds) :
return 0
time = self.get_current_round().seconds_remaining
time = time if time > 0 else 0
return format_time(time)
# For the CLI
def run(self) :
first_player = self.players[0]
for i in range(len(self.rounds)) :
self.current_round = i
if len(self.players) == 2 :
print("Not running all rounds since we don't have enough players")
print()
break
if i != 0 :
print('As the strongest link last round,', green(first_player), 'will go first')
print()
round = self.rounds[i]
self.try_to_start_round(i+1, round, first_player)
first_player = self.handle_finished_round_results(round)
if self.current_round < 2 :
print('Not voting off weakest link since we are on round', self.current_round+1)
weakest_link = None
elif self.current_round == 2 :
print(red('Time to vote off multiple players!'))
weakest_link = self.vote_for_weakest_link()
weakest_link = self.vote_for_weakest_link()
weakest_link = self.vote_for_weakest_link()
else :
weakest_link = self.vote_for_weakest_link()
if first_player == weakest_link :
first_player = round.get_strongest_link(first_player)
self.current_round = len(self.rounds)
while len(self.players) > 2 :
weakest_link = self.vote_for_weakest_link()
if first_player == weakest_link :
first_player = round.get_strongest_link(first_player)
first_player = wait_for_choice('As the strongest link last round, ' + green(first_player) + ' chooses who will go first in the ' +\
red('final round') + '. Choices: ' + ", ".join(self.players) + ' > ', self.players)
self.try_to_start_round('Final', self.final_round, first_player)
print(green(str(self.final_round.winner) + ' is the winner! They win ' + dollars(self.total_bank)))
print()
print("Game over, goodnight!")
# Helpers
def try_to_start_round(self, round_num, round, first_player) :
wait_for_choice("Enter 'S' to start round " + str(round_num) + " > ", 'S')
print('Starting round', round_num)
print()
round.start_round(self.players, first_player)
print('Finished round', round_num)
print()
def handle_finished_round_results(self, round) :
# TODO determine next first player and total bank
self.total_bank += round.round_bank
self.maximum_bank += round.bank_links[-1]
strongest_link = round.get_strongest_link()
print('That round the team banked', dollars(round.round_bank))
adjective = get_random_mean_word()
print('Out of a possible', dollars(self.maximum_bank), "the team banked", 'an' if starts_with_vowel(adjective) else 'a', adjective, dollars(self.total_bank))
print('Statistically, the', green('strongest link'), 'was', green(strongest_link))
print('Statistically, the', red('weakest link'), 'was', red(round.get_weakest_link()))
print()
return strongest_link
def vote_for_weakest_link(self) :
weakest_link = wait_for_choice("Who is the weakest link? Choices: " + ', '.join(self.players) + " > ", self.players)
self.players.remove(weakest_link)
return weakest_link
| 40.559055 | 165 | 0.628422 |
eaf3731855b74e20b8dcfe4969f2876ca251cfc8 | 4,281 | py | Python | wagtail/wagtailtenant/apps.py | caputomarcos/wagtail | a75b47ccb6f5a2a4023aa7400792fc1278dfa107 | [
"BSD-3-Clause"
] | 1 | 2019-11-06T10:51:42.000Z | 2019-11-06T10:51:42.000Z | wagtail/wagtailtenant/apps.py | caputomarcos/wagtail | a75b47ccb6f5a2a4023aa7400792fc1278dfa107 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailtenant/apps.py | caputomarcos/wagtail | a75b47ccb6f5a2a4023aa7400792fc1278dfa107 | [
"BSD-3-Clause"
] | 2 | 2017-08-08T01:39:02.000Z | 2018-05-06T06:16:10.000Z | from django.apps import AppConfig, apps
from django.conf import settings
from django.core.checks import Critical, Error, Warning, register
from django.core.files.storage import default_storage
from wagtail.wagtailtenant.storage import TenantStorageMixin
from wagtail.wagtailtenant.utils import get_public_schema_name, get_tenant_model
class WagtailTenantConfig(AppConfig):
name = 'wagtail.wagtailtenant'
label = 'wagtailtenant'
verbose_name = "Wagtail tenant"
@register('config')
def best_practice(app_configs, **kwargs):
"""
Test for configuration recommendations. These are best practices, they
avoid hard to find bugs and unexpected behaviour.
"""
if app_configs is None:
app_configs = apps.get_app_configs()
# Take the app_configs and turn them into *old style* application names.
# This is what we expect in the SHARED_APPS and TENANT_APPS settings.
INSTALLED_APPS = [
config.name
for config in app_configs
]
if not hasattr(settings, 'TENANT_APPS'):
return [Critical('TENANT_APPS setting not set')]
if not hasattr(settings, 'TENANT_MODEL'):
return [Critical('TENANT_MODEL setting not set')]
if not hasattr(settings, 'SHARED_APPS'):
return [Critical('SHARED_APPS setting not set')]
if 'wagtail.wagtailtenant.routers.TenantSyncRouter' not in settings.DATABASE_ROUTERS:
return [
Critical("DATABASE_ROUTERS setting must contain "
"'wagtail.wagtailtenant.routers.TenantSyncRouter'.")
]
errors = []
django_index = next(i for i, s in enumerate(INSTALLED_APPS) if s.startswith('django.'))
if INSTALLED_APPS.index('wagtail.wagtailtenant') > django_index:
errors.append(
Warning("You should put 'wagtail.wagtailtenant' before any django "
"core applications in INSTALLED_APPS.",
obj="django.conf.settings",
hint="This is necessary to overwrite built-in django "
"management commands with their schema-aware "
"implementations.",
id="wagtailtenant.W001"))
if not settings.TENANT_APPS:
errors.append(
Error("TENANT_APPS is empty.",
hint="Maybe you don't need this app?",
id="wagtailtenant.E001"))
if hasattr(settings, 'PG_EXTRA_SEARCH_PATHS'):
if get_public_schema_name() in settings.PG_EXTRA_SEARCH_PATHS:
errors.append(Critical(
"%s can not be included on PG_EXTRA_SEARCH_PATHS."
% get_public_schema_name()))
# make sure no tenant schema is in settings.PG_EXTRA_SEARCH_PATHS
invalid_schemas = set(settings.PG_EXTRA_SEARCH_PATHS).intersection(
get_tenant_model().objects.all().values_list('schema_name', flat=True))
if invalid_schemas:
errors.append(Critical(
"Do not include tenant schemas (%s) on PG_EXTRA_SEARCH_PATHS."
% ", ".join(sorted(invalid_schemas))))
if not settings.SHARED_APPS:
errors.append(
Warning("SHARED_APPS is empty.",
id="wagtailtenant.W002"))
if not set(settings.TENANT_APPS).issubset(INSTALLED_APPS):
delta = set(settings.TENANT_APPS).difference(INSTALLED_APPS)
errors.append(
Error("You have TENANT_APPS that are not in INSTALLED_APPS",
hint=[a for a in settings.TENANT_APPS if a in delta],
id="wagtailtenant.E002"))
if not set(settings.SHARED_APPS).issubset(INSTALLED_APPS):
delta = set(settings.SHARED_APPS).difference(INSTALLED_APPS)
errors.append(
Error("You have SHARED_APPS that are not in INSTALLED_APPS",
hint=[a for a in settings.SHARED_APPS if a in delta],
id="wagtailtenant.E003"))
if not isinstance(default_storage, TenantStorageMixin):
errors.append(Warning(
"Your default storage engine is not tenant aware.",
hint="Set settings.DEFAULT_FILE_STORAGE to "
"'wagtail.wagtailtenant.storage.TenantFileSystemStorage'",
id="wagtailtenant.W003"
))
return errors
| 40.386792 | 91 | 0.650549 |
2c504756fcafb211e322a233d188d322542d6cd7 | 284 | py | Python | resource/NeverLAN-CTF_2019/Binary4/ex.py | JisoonPark/writeup | f0a7457e9693271e0ca69f123e4bd417b3043261 | [
"MIT"
] | 1 | 2019-05-14T02:18:11.000Z | 2019-05-14T02:18:11.000Z | resource/NeverLAN-CTF_2019/Binary4/ex.py | JisoonPark/writeup | f0a7457e9693271e0ca69f123e4bd417b3043261 | [
"MIT"
] | null | null | null | resource/NeverLAN-CTF_2019/Binary4/ex.py | JisoonPark/writeup | f0a7457e9693271e0ca69f123e4bd417b3043261 | [
"MIT"
] | 1 | 2021-09-24T13:25:35.000Z | 2021-09-24T13:25:35.000Z | #:100F4000982F9F7D9554933028F40C5F1F4FFFE33B
data = open("embedded_db.hex", "r").read().split('\r\n')[:-2]
totLen = 0
f = open("bin", "w")
for l in data:
assert(l[0] == ":")
dataLen = int(l[1: 3], 16)
totLen += dataLen
f.write(l[9: 9 + dataLen * 2].decode("hex"))
print totLen
| 20.285714 | 61 | 0.619718 |
67155c7485c1bd874750212c654c80ec028250a8 | 583 | py | Python | thirdparty/netmiko/paloauto/gspackage/paloauto.py | gwaysoft/python | a74a0b553dfca9606083a41ab6d03801e67d2467 | [
"Apache-2.0"
] | null | null | null | thirdparty/netmiko/paloauto/gspackage/paloauto.py | gwaysoft/python | a74a0b553dfca9606083a41ab6d03801e67d2467 | [
"Apache-2.0"
] | null | null | null | thirdparty/netmiko/paloauto/gspackage/paloauto.py | gwaysoft/python | a74a0b553dfca9606083a41ab6d03801e67d2467 | [
"Apache-2.0"
] | null | null | null | from netmiko.paloalto import PaloAltoPanosSSH
connectConfig = {
'device_type': 'paloalto_panos',
'host': '172.16.8.81',
'username': 'python-api',
'password': 'python'
}
def executeCommand(command):
net_connect = PaloAltoPanosSSH(**connectConfig)
return net_connect.send_command(command)
def executeConfigCommand(config_commands):
print(config_commands)
if len(config_commands) == 0:
return
config_commands.append("commit")
net_connect = PaloAltoPanosSSH(**connectConfig)
return net_connect.send_config_set(config_commands)
| 22.423077 | 55 | 0.722127 |
aa3a91f508714fa8f36ed3eac3f57ba6fb6da77d | 62 | py | Python | spotcli/utils/__init__.py | SupersonicAds/spot-cli | d3b53307e1389614320c9938d7e0dbb2222b203a | [
"MIT"
] | 2 | 2021-07-05T12:54:07.000Z | 2022-03-11T10:19:50.000Z | spotcli/utils/__init__.py | SupersonicAds/spot-cli | d3b53307e1389614320c9938d7e0dbb2222b203a | [
"MIT"
] | 14 | 2020-11-04T20:52:45.000Z | 2021-01-27T08:45:07.000Z | spotcli/utils/__init__.py | SupersonicAds/spot-cli | d3b53307e1389614320c9938d7e0dbb2222b203a | [
"MIT"
] | 1 | 2020-12-03T12:14:16.000Z | 2020-12-03T12:14:16.000Z | from spotcli.utils.filter import filter
__all__ = ["filter"]
| 15.5 | 39 | 0.758065 |
527ce5831f8dab5b6c19d205a2eb6284945c0ee7 | 9,449 | py | Python | src/RMLtoShacl.py | RMLio/RML2SHACL | 72f8ae0cfd91eb4a3ddd02b9e5a3fad1e174460a | [
"MIT"
] | 2 | 2021-12-07T03:31:21.000Z | 2021-12-21T03:31:13.000Z | src/RMLtoShacl.py | RMLio/RML2SHACL | 72f8ae0cfd91eb4a3ddd02b9e5a3fad1e174460a | [
"MIT"
] | null | null | null | src/RMLtoShacl.py | RMLio/RML2SHACL | 72f8ae0cfd91eb4a3ddd02b9e5a3fad1e174460a | [
"MIT"
] | 1 | 2021-12-07T03:31:25.000Z | 2021-12-07T03:31:25.000Z | import argparse
import csv
import logging
import os
from pathlib import Path
import string
import time
import timeit
from typing import Any, List
import rdflib
from rdflib import RDF
from requests.exceptions import HTTPError
from .RML import *
from .SHACL import *
class RMLtoSHACL:
def __init__(self):
self.RML = RML()
self.shaclNS = rdflib.Namespace('http://www.w3.org/ns/shacl#')
self.rdfSyntax = rdflib.Namespace(
'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
self.SHACL = SHACL()
def helpAddTriples(self, shacl_graph: Graph, sub: Identifier,
pred: Identifier, obj_arr: Optional[List[Identifier]]) -> None:
"""
This method takes an array of object terms (obj_arr) associated with
the given predicate (pred) and add them to the
subject node (sub) as triples.
"""
if obj_arr is None:
return
for el in obj_arr:
shacl_graph.add(
(sub, pred, el))
def transformIRI(self, node: Identifier, shacl_graph: Graph) -> None:
shacl_graph.add((node, self.shaclNS.nodeKind, self.shaclNS.IRI))
def transformBlankNode(self, node: Identifier, shacl_graph: Graph) -> None:
shacl_graph.add((node, self.shaclNS.nodeKind, self.shaclNS.BlankNode))
def transformList(self, node: Identifier, arr: List[Any], shacl_graph: Graph) -> None:
"""
Transform the given array objects into RDF compliant array list.
The transformation is done in the manner of a functional list.
"""
current_node = node
next_node = rdflib.BNode()
size = len(arr)
for i, obj in enumerate(arr):
shacl_graph.add(
(current_node, self.rdfSyntax.first, rdflib.Literal(obj)))
if i != size - 1:
shacl_graph.add(
(current_node, self.rdfSyntax.rest, next_node))
else:
shacl_graph.add(
(current_node, self.rdfSyntax.rest, self.rdfSyntax.nil))
current_node = next_node
next_node = rdflib.BNode()
def transformLiteral(self, node: Identifier, termMap: TermMap, shacl_graph: Graph) -> None:
shacl_graph.add((node, self.shaclNS.nodeKind, self.shaclNS.Literal))
# Transform rr:language
# it can be a list of languages
language_iri = self.RML.LANGUAGE
if language_iri in termMap.po_dict:
languages_arr = termMap.po_dict[language_iri]
for language in languages_arr:
languageBlank = rdflib.BNode()
shacl_graph.add(
(node, self.shaclNS.languageIn, languageBlank))
self.transformList(languageBlank, language.split('-'), shacl_graph)
# Transform rr:datatype
datatype_iri = self.RML.DATATYPE
if datatype_iri in termMap.po_dict:
self.helpAddTriples(shacl_graph, node,
self.shaclNS.datatype, termMap.po_dict[datatype_iri])
def serializeTemplate(self, templateString: Identifier) -> Identifier:
# we want to replace this {word} into a wildcard ='.'
# and '*' means zero or unlimited amount of characters
parts = templateString.split('{')
parts2 = []
for part in parts:
if '}' in part:
parts2 = parts2 + part.split('}')
else:
parts2 = parts2 + [part]
string = ''
tel = 1
for part in parts2:
if tel % 2 != 0:
string = string + part
else:
string = string + '.*'
# wildcard = '.' + '*'
tel += 1
resultaat = rdflib.Literal(string)
return resultaat
def createNodeShape(self, triples_map: TriplesMap, shacl_graph: Graph) -> Identifier:
# start of SHACL shape
subjectShape = rdflib.URIRef(triples_map.iri + "/shape")
shacl_graph.add((subjectShape, rdflib.RDF.type, self.shaclNS.NodeShape))
self.transformSubjectMap(subjectShape, triples_map.sm, shacl_graph)
return subjectShape
def transformSubjectMap(self, node: Identifier, subjectmap: SubjectMap, shacl_graph: Graph) -> None:
"""
Transform the given SubjectMap into the corresponding SHACL shapes and
store them in the self.SHACL's rdflib graph.
"""
po_dict = subjectmap.po_dict
# Start of class and targetNode shacl mapping
self.helpAddTriples(shacl_graph, node,
self.shaclNS.targetNode,
po_dict.get(self.RML.CONSTANT, []))
self.helpAddTriples(shacl_graph, node,
self.shaclNS.targetClass,
po_dict.get(self.RML.CLASS, []))
self.helpAddTriples(shacl_graph, node,
self.shaclNS["class"],
po_dict.get(self.RML.CLASS, []))
# End of class and targetNode shacl mapping
# Shacl shl:pattern parsing
template_strings = [self.serializeTemplate(x)
for x in po_dict.get(self.RML.TEMPLATE, [])]
self.helpAddTriples(shacl_graph, node,
self.shaclNS.pattern, template_strings)
# Uri or Literal parsing
self.transformIRIorLiteralorBlankNode(po_dict, node, subjectmap, shacl_graph)
def transformIRIorLiteralorBlankNode(self, po_dict: Dict[URIRef, List[Any]],
node: Identifier, termMap: TermMap,
shacl_graph: Graph) -> None:
# Uri or Literal parsing
type_arr = po_dict.get(self.RML.TERMTYPE)
if type_arr:
term_type = type_arr[0]
if term_type == self.RML.r2rmlNS.Literal:
self.transformLiteral(node, termMap, shacl_graph)
elif term_type == self.RML.r2rmlNS.IRI:
self.transformIRI(node, shacl_graph)
elif term_type == self.RML.r2rmlNS.BlankNode:
self.transformBlankNode(node, shacl_graph)
else:
print(f"WARNING: {term_type} is not a valid term type for {self}, defaulting to IRI")
self.transformIRI(node, shacl_graph)
# default behaviour if no termType is defined
elif po_dict.get(self.RML.REFERENCE):
self.transformLiteral(node, termMap, shacl_graph)
else:
self.transformIRI(node, shacl_graph)
def transformPOM(self, node: Identifier, pom: PredicateObjectMap, shacl_graph: Graph) -> None:
pm = pom.PM
om = pom.OM
# Find the subject's class in
# Check if it defines the class of the subject node (node) and
# return immediately since the pom is parsed
pred_constant_objs = pm.po_dict.get(self.RML.CONSTANT)
if pred_constant_objs and pred_constant_objs[0] == rdflib.RDF.type:
om_constant_objs = om.po_dict.get(self.RML.CONSTANT)
self.helpAddTriples(shacl_graph, node,
self.shaclNS.targetClass, om_constant_objs)
return
# Fill in the sh:property node of the given subject (@param node)
sh_property = rdflib.BNode()
shacl_graph.add(
(node, self.shaclNS.property, sh_property))
self.transformIRIorLiteralorBlankNode(om.po_dict, sh_property, om, shacl_graph)
ptm = om.po_dict.get(self.RML.r2rmlNS.parentTriplesMap)
if ptm:
ptm = ptm[0] + "/shape"
shacl_graph.add(
(sh_property, self.shaclNS.node, ptm))
self.helpAddTriples(shacl_graph, sh_property,
self.shaclNS.path, pm.po_dict.get(self.RML.CONSTANT))
def writeShapeToFile(self, file_name, shape_dir="shapes/"):
for prefix, ns in self.RML.graph.namespaces():
self.SHACL.graph.bind(prefix, ns)
# @base is used for <> in the RML ttl graph
self.SHACL.graph.bind('sh', 'http://www.w3.org/ns/shacl#', False)
self.SHACL.graph.bind(
'rdfs', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
parent_folder = os.path.dirname(file_name)
Path(f"%s%s" % (shape_dir, parent_folder)).mkdir(
parents=True, exist_ok=True)
filenNameShape = "%s%s" % (shape_dir, file_name)
self.SHACL.graph.serialize(destination=filenNameShape, format='turtle')
return filenNameShape
def evaluate_file(self, rml_mapping_file):
self.RML.parseFile(rml_mapping_file)
for _, triples_map in self.RML.tm_model_dict.items():
subject_shape_node = self.createNodeShape(triples_map, self.SHACL.graph)
for pom in triples_map.poms:
self.transformPOM(subject_shape_node, pom, self.SHACL.graph)
outputfileName = f"{rml_mapping_file}-output-shape.ttl"
self.writeShapeToFile(outputfileName)
validation_shape_graph = rdflib.Graph()
validation_shape_graph.parse("shacl-shacl.ttl", format="turtle")
self.SHACL.Validation(validation_shape_graph, self.SHACL.graph)
logging.debug("*" * 100)
logging.debug("RESULTS")
logging.debug("=" * 100)
logging.debug(self.SHACL.results_text)
return None
| 37.645418 | 104 | 0.601333 |
433235306f53ad7f89b5769f65c969cf3858fd2a | 1,482 | py | Python | libraries/botbuilder-testing/setup.py | suhyangbae1234/botbuilder-python | 01fcbe9e4d60bfe1bd314e4481834e453284ca41 | [
"MIT"
] | 1 | 2020-02-19T15:50:10.000Z | 2020-02-19T15:50:10.000Z | libraries/botbuilder-testing/setup.py | Fortune-Adekogbe/botbuilder-python | 4e48c874c32a2a7fe7f27a7a1f825e2aa39466c4 | [
"MIT"
] | null | null | null | libraries/botbuilder-testing/setup.py | Fortune-Adekogbe/botbuilder-python | 4e48c874c32a2a7fe7f27a7a1f825e2aa39466c4 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from setuptools import setup
REQUIRES = [
"botbuilder-schema>=4.7.1",
"botbuilder-core>=4.7.1",
"botbuilder-dialogs>=4.7.1",
]
TESTS_REQUIRES = ["aiounittest==1.3.0"]
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, "botbuilder", "testing", "about.py")) as f:
package_info = {}
info = f.read()
exec(info, package_info)
with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name=package_info["__title__"],
version=package_info["__version__"],
url=package_info["__uri__"],
author=package_info["__author__"],
description=package_info["__description__"],
keywords="botbuilder-testing bots ai testing botframework botbuilder",
long_description=long_description,
long_description_content_type="text/x-rst",
license=package_info["__license__"],
packages=["botbuilder.testing"],
install_requires=REQUIRES + TESTS_REQUIRES,
tests_require=TESTS_REQUIRES,
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3.7",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| 30.875 | 74 | 0.68556 |
b717fbac590d4840b6a79d910a698babc007a817 | 5,175 | py | Python | bot.py | otobraz/LLKEventsBot | 68e1ba3e5d64abe6e28e478a0631900d3962100f | [
"MIT"
] | null | null | null | bot.py | otobraz/LLKEventsBot | 68e1ba3e5d64abe6e28e478a0631900d3962100f | [
"MIT"
] | null | null | null | bot.py | otobraz/LLKEventsBot | 68e1ba3e5d64abe6e28e478a0631900d3962100f | [
"MIT"
] | null | null | null | # bot.py
import os
import sqlite3
import json
import datetime
from dotenv import load_dotenv
import discord
from discord.ext import commands
from discord.ext.commands import Bot
from cogs.utils import helper as h
intents = discord.Intents.default()
intents.members = True
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
PREFIX = os.getenv('BOT_PREFIX')
dir_path = os.path.dirname(os.path.realpath(__file__))
extensions = ['cogs.general', 'cogs.events', 'cogs.moderation']
class LLKEventsBot(Bot):
def __init__(self):
super().__init__(
description="Bot created by Oto#2494",
command_prefix=PREFIX,
owner_id=271992863175344130,
intents=intents,
help_command=None
)
print('\nLoading embed data...')
try:
with open(f'{dir_path}/db/embed_id.json', 'r+') as f:
try:
self.embed_data = json.load(f)
if self.embed_data:
self.embed_id = self.embed_data['eventEmbed']['id']
except:
self.embed_data = {"eventEmbed":{
"id": None }}
self.embed_id = self.embed_data['eventEmbed']['id']
json.dump(self.embed_data, f, indent=4)
except:
with open(f'{dir_path}/db/embed_id.json', 'w+'):
self.embed_data = {"eventEmbed":{
"id": self.bot.embed_id
}}
self.embed_id = self.embed_data['eventEmbed']['id']
json.dump(self.embed_data, f, indent=4)
print('Loading permissions data...')
try:
with open('db/roles.json', 'r+') as f:
try:
self.perms_data = json.load(f)
if self.perms_data:
self.perms = self.perms_data['permissions']
except Exception as e:
print(f'{e}')
except:
with open(f'{dir_path}/db/roles.json', 'w+') as f:
self.perms_data = {"permissions":{
"admins": [],
"mods": [],
"hosts": []
}}
self.perms = self.perms_data['permissions']
json.dump(self.perms_data, f, indent=4)
print('Loading roles DB...')
self.conn = sqlite3.connect(f'{dir_path}/db/events.db')
self.cursor = self.conn.cursor()
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS events (
event_id STRING NOT NULL,
user_id STRING NOT NULL,
description STRING NOT NULL,
target STRING NOT NULL
)
""")
# print('Loading embed data...')
# try:
# with open('db/embed_id.json', 'r+') as f:
# try:
# self.embed_data = json.load(f)
# if self.embed_data:
# self.embed_id = self.embed_data['eventEmbed']['id']
# except Exception as e:
# print(f'{e}')
# except:
# open('db/embed_id.json', 'w+')
async def on_ready(self):
if not os.path.exists('db'):
os.makedirs('db')
if not os.path.exists('logs'):
os.makedirs('logs')
print('\nLoading extensions...')
for extension in extensions:
print(f'Loading {extension}')
bot.load_extension(extension)
await bot.change_presence(activity=discord.Game(f'{PREFIX}help'))
print(f'\nLogged in as: {bot.user.name} - {bot.user.id}\nVersion: {discord.__version__}\n')
# async def on_message(self, msg):
# if msg.author.bot:
# return
async def on_command_error(self, ctx, error):
if isinstance(error, commands.BotMissingPermissions):
await ctx.send(f'I have no permission to do that')
return
elif isinstance(error, commands.CheckFailure):
await ctx.send(f'You have no permission to use this command')
return
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f'You forgot to inform the following parameter: {error.param}')
else:
d = datetime.datetime.now()
with open(f'logs/{d.year}-{d.month}-{d.day}.log', 'a', encoding='utf8') as f:
# f.write(f'''-------------\n{d.hour}:{d.minute}:{d.second}.{d.microsecond}\n{type(error)}\n{error}\n-------------\n\n'''')
f.write(
'-------------\n'
f'{d.hour}:{d.minute}:{d.second}.{d.microsecond}\n'
f'Command: {ctx.message.content}\n'
f'Author: {ctx.author}\n'
f'Exception: {type(error)}\n'
f'Description: {error}\n'
'-------------\n\n'
)
await ctx.send(f'It seems something went wrong:```{error}```')
return
bot = LLKEventsBot()
bot.run(TOKEN)
| 34.966216 | 139 | 0.508599 |
2d0ab933a7fc72ef74e24c1902a98429ff1fbca4 | 1,242 | py | Python | setup.py | elyashiv3839/openapi-schema-generator | 2bb05f88529c9d1f269896d254222ae2bf871b3c | [
"MIT"
] | null | null | null | setup.py | elyashiv3839/openapi-schema-generator | 2bb05f88529c9d1f269896d254222ae2bf871b3c | [
"MIT"
] | null | null | null | setup.py | elyashiv3839/openapi-schema-generator | 2bb05f88529c9d1f269896d254222ae2bf871b3c | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="openapi-schema-generator",
version="1.0.1",
author="Elyashiv Danino",
author_email="elyashiv3839@gmail.com",
description="Resolve schema and deploy to single schema",
long_description="file: README.md",
long_description_content_type="text/markdown",
url="https://github.com/elyashiv3839/openapi-schema-generator.git",
download_url="https://github.com/elyashiv3839/openapi-schema-generator/archive/refs/tags/1.0.1.tar.gz",
project_urls={"Bug Tracker": "https://github.com/elyashiv3839/openapi-schema-generator.git/issues"},
classifier=[
"Programming Language :: Python :: 3",
"Licence :: MIT",
"Operating System :: Multi-platform",
],
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=["attrs==21.2.0", "importlib-metadata==4.8.1", "isodate==0.6.0", "jsonschema==3.2.0",
"openapi-schema-validator==0.1.5", "pyrsistent==0.18.0", "PyYAML==5.4.1", "six==1.16.0",
"typing-extensions==3.10.0.2", "zipp==3.5.0",
"compare_objects==1.0.0"]
)
| 42.827586 | 110 | 0.641707 |
672c4ff1e07643a695463be4a592c33e40789ea1 | 6,669 | py | Python | autocertkit/storage_tests.py | TalonsLee/auto-cert-kit | 5f18de4136be8994d3ee26cf9b53a95cb8d608bc | [
"BSD-2-Clause"
] | null | null | null | autocertkit/storage_tests.py | TalonsLee/auto-cert-kit | 5f18de4136be8994d3ee26cf9b53a95cb8d608bc | [
"BSD-2-Clause"
] | null | null | null | autocertkit/storage_tests.py | TalonsLee/auto-cert-kit | 5f18de4136be8994d3ee26cf9b53a95cb8d608bc | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) Citrix Systems Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""A module for storage specific test cases"""
import testbase
from utils import *
class PerfTestClass(testbase.LocalStorageTestClass):
"""A somewhat generic test class for local storage
performance tests that could be expanded to include
additional plugin-based tasks"""
# Deine the test timeout in seconds and the number of test VMs
timeout = 3600
vm_count = 3
# SSH command variables
username = 'root'
password = DEFAULT_PASSWORD
# Class variables
test = ''
cmd_str = ''
def _setup_vms(self, session):
"""Creates vm_count VMs on the
master host's local SR"""
host_ref = get_pool_master(session)
net_ref = get_management_network(session)
if 'device_config' in self.config and 'sr' in self.config['device_config']:
sr_ref = self.config['device_config']['sr']
else:
log.debug("Local SR info is not available from device tag.")
log.debug("Choosing first local SR.")
sr_ref = get_local_sr(session, host_ref)
log.debug("%s is chosen for local storage test." % sr_ref)
return deploy_common_droid_vms_on_hosts(session,
[host_ref],
[net_ref],
self.vm_count,
{net_ref: self.get_static_manager(
net_ref)},
sr_ref)[host_ref]
def _call_plugin(self, session, vm_ref_list, call):
"""Util function to call ACK plugin method"""
res = []
for vm_ref in vm_ref_list:
res.append(call_ack_plugin(self.session, call,
{'vm_ref': vm_ref,
'mip': get_context_vm_mip(vm_ref),
'username': self.username,
'password': self.password}))
return res
def _create_test_threads(self, session, vm_ref_list):
"""Spawns a new test thread using the cmd_strin a
timeout function over SSH to every VM in vm_ref_list"""
threads = []
for vm_ref in vm_ref_list:
threads.append(create_test_thread(lambda vm=vm_ref: TimeoutFunction(ssh_command(get_context_vm_mip(vm),
self.username,
self.password,
self.cmd_str),
self.timeout, '%s test timed out %d' % (self.test, self.timeout))))
return threads
def _run_test(self, session):
"""Run test function"""
# setup vms
vm_ref_list = self._setup_vms(session)
# Make certain the VMs are available
for vm_ref in vm_ref_list:
check_vm_ping_response(session, vm_ref, get_context_vm_mip(vm_ref))
# deploy test rpms
self._call_plugin(session, vm_ref_list, 'deploy_' + self.test)
# create, start test threads, wait until complete
log.debug("About to run %s test..." % self.test)
threads = self._create_test_threads(session, vm_ref_list)
# Wait for the threads to finish running or timeout
start = time.time()
while check_test_thread_status(threads):
time.sleep(1)
if should_timeout(start, self.timeout):
raise Exception("%s test timed out %s" %
(self.test, self.timeout))
# retrieve the logs
log.debug("%s test is complete, retrieving logs" % self.test)
res = self._call_plugin(session, vm_ref_list,
'retrieve_' + self.test + '_logs')
return {'info': 'Test ran successfully'}
def test_iozone(self, session):
"""Perform the IOZone Local Storage benchmark"""
self.test = 'iozone'
self.cmd_str = '/usr/bin/iozone -r 4k -r 128k -r 1m -s 128m >> /root/localhost.log'
return self._run_test(session)
def test_bonnie(self, session):
"""Perform the Bonnie++ local storage benchmark"""
config = {'scratch_dir': '/root/bonnie',
'file_size': '2000',
'count': '1',
'user': 'citrix',
'log': '2>&1 | tee /root/localhost.log'}
self.test = 'bonnie'
self.cmd_str = 'bonnie++ -d %s -s %s -x %s -u %s %s' % (config['scratch_dir'],
config[
'file_size'],
config[
'count'],
config['user'],
config['log'])
return self._run_test(session)
| 44.165563 | 147 | 0.540561 |
3f82ca6f98282a9974ea5cbab7dae1b91cb7ca33 | 4,213 | py | Python | deepcell/datasets/phase.py | esgomezm/deepcell-tf | 6693c9ed7e76793561e6c2281437acaf3e4fa441 | [
"Apache-2.0"
] | null | null | null | deepcell/datasets/phase.py | esgomezm/deepcell-tf | 6693c9ed7e76793561e6c2281437acaf3e4fa441 | [
"Apache-2.0"
] | null | null | null | deepcell/datasets/phase.py | esgomezm/deepcell-tf | 6693c9ed7e76793561e6c2281437acaf3e4fa441 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2019 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Phase datasets including
the raw images and ground truth segmentation masks"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deepcell.datasets import Dataset
# pylint: disable=line-too-long
methods = {
'Cell culture':
'For our cytoplasmic data, NIH3t3 and Raw263.7 cell lines were '
'cultured in DMEM media supplemented with 10% FBS and 1x '
'penicillin-streptomycin antibiotic. Cells were incubated at 37C in a '
'humidified 5% CO2 atmosphere. When 70-80% confluent, cells were '
'passaged and seeded onto fibronectin coated glass bottom 96-well '
'plates at 10,000-20,000 cells/well. The seeded cells were then '
'incubated for 1-2 hours to allow for cell adhesion to the bottom of '
'the well plate before imaging.',
'Imaging':
'Cells were imaged on a Nikon Eclipse Ti-2 fluorescence microscope at '
'20x and 40x for NIH3t3 and Raw293.6 cells respectively. The well '
'plate was placed in a Nikon incubated stage with an Oko labs '
'environment controller set to 37C and 5% CO2. Each data set was '
'generated using the Nikon jobs function to collect a z-stack of phase '
'images.'
}
#:
all_cells = Dataset(
path='20190813_all_phase_512_contrast_adjusted_curated.npz',
url='https://deepcell-data.s3-us-west-1.amazonaws.com/cytoplasm/brightfield/20190813_all_phase_512_contrast_adjusted_curated.npz',
file_hash='20d6a99216e7c966182e79eef4eaf937',
metadata={'methods': methods}
)
#:
nih_3t3 = Dataset(
path='nih_3t3-phase.npz',
url='https://deepcell-data.s3.amazonaws.com/cytoplasm/brightfield/nih_3t3-phase_fixed.npz',
file_hash='d1a3b5a548300ef8389cee8021f53957',
metadata={'methods': methods}
)
#:
a549 = Dataset(
path='A549-phase.npz',
url='https://deepcell-data.s3.amazonaws.com/cytoplasm/brightfield/A549-phase_fixed.npz',
file_hash='d1820a7057079a774a9def8ae4634e74',
metadata={'methods': methods}
)
#:
cho = Dataset(
path='CHO-phase.npz',
url='https://deepcell-data.s3.amazonaws.com/cytoplasm/brightfield/CHO-phase_fixed.npz',
file_hash='0d059506a9500e155d0fbfee64c43e21',
metadata={'methods': methods}
)
#:
hela_s3 = Dataset(
path='HeLa_S3-phase.npz',
url='https://deepcell-data.s3.amazonaws.com/cytoplasm/brightfield/HeLa_S3-phase_fixed.npz',
file_hash='8ee318c32e41c9ff0fccf40bcd9d993d',
metadata={'methods': methods}
)
#:
hela = Dataset(
path='HeLa-phase.npz',
url='https://deepcell-data.s3.amazonaws.com/cytoplasm/brightfield/HeLa-phase_fixed.npz',
file_hash='f16c22201d63d1ab856f066811b3dcfa',
metadata={'methods': methods}
)
#:
pc3 = Dataset(
path='PC3-phase.npz',
url='https://deepcell-data.s3.amazonaws.com/cytoplasm/brightfield/PC3-phase_fixed.npz',
file_hash='1be17d9f6dbb009eed542f56f8282edd',
metadata={'methods': methods}
)
| 37.616071 | 134 | 0.721339 |
6aa8e32560d43404acb9adaa765f15e4f43db709 | 14,423 | py | Python | addons/hr_attendance/models/hr_employee.py | shdkej/odoo_gvm | 15b797e60a329f5d2fddb817a2b30a926b5873fa | [
"MIT"
] | null | null | null | addons/hr_attendance/models/hr_employee.py | shdkej/odoo_gvm | 15b797e60a329f5d2fddb817a2b30a926b5873fa | [
"MIT"
] | 3 | 2020-12-06T11:10:32.000Z | 2020-12-06T11:16:48.000Z | addons/hr_attendance/models/hr_employee.py | shdkej/odoo_gvm | 15b797e60a329f5d2fddb817a2b30a926b5873fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from random import choice
from string import digits
from dateutil.relativedelta import relativedelta
from odoo.exceptions import UserError
from odoo import models, fields, api, exceptions, _, SUPERUSER_ID
from datetime import datetime,timedelta
from odoo.http import content_disposition, dispatch_rpc, request
import requests
import logging
import sys
_logger = logging.getLogger(__name__)
class HrEmployee(models.Model):
_inherit = "hr.employee"
_description = "Employee"
def _default_random_pin(self):
return ("".join(choice(digits) for i in range(4)))
def _default_random_barcode(self):
barcode = None
while not barcode or self.env['hr.employee'].search([('barcode', '=', barcode)]):
barcode = "".join(choice(digits) for i in range(8))
return barcode
barcode = fields.Char(string="Badge ID", help="ID used for employee identification.", default=_default_random_barcode, copy=False)
pin = fields.Char(string="PIN", default=_default_random_pin, help="PIN used to Check In/Out in Kiosk Mode (if enabled in Configuration).", copy=False)
attendance_ids = fields.One2many('hr.attendance', 'employee_id', help='list of attendances for the employee')
last_attendance_id = fields.Many2one('hr.attendance', compute='_compute_last_attendance_id')
attendance_state = fields.Selection(string="Attendance", selection=[('checked_out', "Checked out"), ('checked_in', "Checked in")],defalut ='cheked_out')
outing_state = fields.Selection(string="Attendance", selection=[('outing_out', "outing out"), ('outing_in', "outing in")])
manual_attendance = fields.Boolean(string='Manual Attendance', compute='_compute_manual_attendance', inverse='_inverse_manual_attendance',
help='The employee will have access to the "My Attendances" menu to check in and out from his session')
check_today_attendance = fields.Boolean()
location = fields.Char()
_sql_constraints = [('barcode_uniq', 'unique (barcode)', "The Badge ID must be unique, this one is already assigned to another employee.")]
@api.multi
def _compute_manual_attendance(self):
for employee in self:
employee.manual_attendance = employee.user_id.has_group('hr.group_hr_attendance') if employee.user_id else False
@api.multi
def _inverse_manual_attendance(self):
manual_attendance_group = self.env.ref('hr.group_hr_attendance')
for employee in self:
if employee.user_id:
if employee.manual_attendance:
manual_attendance_group.users = [(4, employee.user_id.id, 0)]
else:
manual_attendance_group.users = [(3, employee.user_id.id, 0)]
@api.depends('attendance_ids')
def _compute_last_attendance_id(self):
for employee in self:
employee.last_attendance_id = employee.attendance_ids and employee.attendance_ids[0] or False
@api.depends('last_attendance_id.check_in', 'last_attendance_id.check_out', 'last_attendance_id')
def _compute_attendance_state(self):
for employee in self:
#employee.attendance_state = employee.last_attendance_id and not employee.last_attendance_id.check_out and 'checked_in' or 'checked_out'
employee.attendance_state = employee.last_attendance_id and not employee.last_attendance_id.check_out and 'checked_in' or 'checked_out'
@api.constrains('pin')
def _verify_pin(self):
for employee in self:
if employee.pin and not employee.pin.isdigit():
raise exceptions.ValidationError(_("The PIN must be a sequence of digits."))
@api.model
def attendance_scan(self, barcode):
""" Receive a barcode scanned from the Kiosk Mode and change the attendances of corresponding employee.
Returns either an action or a warning.
"""
employee = self.search([('barcode', '=', barcode)], limit=1)
return employee and employee.attendance_action('hr_attendance.hr_attendance_action_kiosk_mode') or \
{'warning': _('No employee corresponding to barcode %(barcode)s') % {'barcode': barcode}}
@api.multi
def attendance_manual(self, next_action, address, entered_pin=None):
self.ensure_one()
if not (entered_pin is None) or self.env['res.users'].browse(SUPERUSER_ID).has_group('hr_attendance.group_hr_attendance_use_pin') and (self.user_id and self.user_id.id != self._uid or not self.user_id):
if entered_pin != self.pin:
return {'warning': _('Wrong PIN')}
return self.attendance_action(next_action, address)
@api.multi
def attendance_action(self, next_action, location):
""" Changes the attendance of the employee.
Returns an action to the check in/out message,
next_action defines which menu the check in/out message should return to. ("My Attendances" or "Kiosk Mode")
"""
self.ensure_one()
action_message = self.env.ref('hr_attendance.hr_attendance_action_greeting_message').read()[0]
action_message['previous_attendance_change_date'] = self.last_attendance_id and (self.last_attendance_id.check_out or self.last_attendance_id.check_in) or False
action_message['employee_name'] = self.name
action_message['next_action'] = next_action
if self.user_id:
modified_attendance = self.sudo(self.user_id.id).attendance_action_change(location)
else:
modified_attendance = self.sudo().attendance_action_change()
action_message['attendance'] = modified_attendance.read()[0]
return {'action': action_message}
def write_outing_list(ids,destination,reason,date_to,date_from,location):
hr_employee = request.env['hr.employee'].search([('user_id','=',request.env.uid)])
#외근
if hr_employee.outing_state == 'outing_out':
date_to = str(date_to)
date_form = str(date_from)
date = date_to + " ~ " + date_from
if reason == '':
raise UserError (_('구체적인사유를 입력하세요'))
elif destination == '':
raise UserError (_('외근 목적지를 입력하세요'))
else:
hr_attendance = request.env['hr.attendance']
hr_attendance.create({'employee_id':hr_employee.id,
'reason':reason,
'destination':destination,
'outing_start':datetime.today(),
'date':date,
})
hr_employee.write({'outing_state':'outing_in',
'attendance_state':'checked_out',
})
#복귀/퇴근
else:
_logger.warning('test')
attendance = request.env['hr.attendance'].search([('employee_id', '=', hr_employee.id), ('outing_end', '=', False), ('outing_start', '!=', False)], limit=1)
attendance.write({'outing_end':datetime.today(),
'outing_place':location
})
hr_employee.write({'outing_state':'outing_out',
'attendance_state':'checked_out',
})
def _Check_out_time(self):
""" 퇴근을 하지 않았을경우, 다음날 00시 00분 00초에 자동으로 퇴근이 된다."""
#현재시간
check_out_time = datetime.now()
#00시 00분 00초
#한국과의 시차는9 시간
out_time = check_out_time.replace(hour=15, minute=0,second=0)
#체크아웃 정보 가져오기
hr_attendance = self.env['hr.attendance'].search([('check_out', '=', False), ('check_in', '!=', False)])
attendance = request.env['hr.attendance'].search([('outing_end', '=', False), ('outing_start', '!=', False)])
hr_employee = request.env['hr.employee'].search([('user_id','=',request.env.uid)])
#출장을 작성
for att in hr_attendance:
att.write({
'check_out':out_time,
})
#외근을 작성
for att in attendance:
att.write({
'outing_end':out_time,
})
hr_employee.write({'outing_state':'outing_out',
})
@api.multi
def attendance_action_change(self, location):
""" Check In/Check Out action
Check In: create a new attendance record
Check Out: modify check_out field of appropriate attendance record
"""
if len(self) > 1:
raise exceptions.UserError(_('Cannot perform check in or check out on multiple employees.'))
#현재시간
present_date = datetime.now()
#해당 유저의 처음 출근 시간 파악
hr_attendance = self.env['hr.attendance'].search([('employee_id', '=', self.id)], limit=1)
check_in_date = hr_attendance.check_in
#출퇴근 기준 시간의 초기화
check_in_cut_line = ""
check_out_cut_line = ""
find_1 = "China"
find_2 = "Vietnam"
location = "6X4Q+22 Trần Xá, Yên Phong, Bắc Ninh, Vietnam"
China = location.find(find_1)
Vietnam = location.find(find_2)
if China != -1:
present_date = present_date - timedelta(hours=1)
_logger.warning("date%s"%present_date)
elif Vietnam != -1:
present_date = present_date - timedelta(hours=2)
_logger.warning("date2%s"%present_date)
#해당 유저의 처음 출근시간이 존재할 경우
if check_in_date != False:
#해당 유저의 첫 체크인시간을 년,월,일,시간,분,초로 변경
check_in_last_time = datetime.strptime(check_in_date, '%Y-%m-%d %H:%M:%S')
#체크인 기준시간(다음날 00시 00분 00초)
#15시간을 더해준 이유: 서버의 시간은 미국시간이므로 한국과 9시간의 차이가 발생함
check_in_cut_line = check_in_last_time
check_in_cut_line = check_in_cut_line.replace(hour=15, minute=0,second=0)
#체크아웃 기준시간(1시간뒤)
check_out_cut_line = check_in_last_time + relativedelta(hours=1)
#체크인상태일 경우
_logger.warning(self.attendance_state)
if self.attendance_state != 'checked_in':
#해당 유저의 출근시간이 존재하지 않을경우
if check_in_date != False:
#출퇴근 기준시간보다 현재의 시간이 클 경우
if present_date > check_in_cut_line:
#서버에 입력
vals = {
'employee_id': self.id,
'check_in': present_date,
'check_in_place': location,
}
self.attendance_state = 'checked_in'
return self.env['hr.attendance'].create(vals)
#출퇴근 기준시간보다 현재의 시간이 작을 경우
else:
raise UserError(_('출근시간이 아닙니다.'))
#해당 유저의 출퇴근시간이 존재하지 않을경우
else:
#서버에 생성
vals = {
'employee_id': self.id,
'check_in': present_date,
'check_in_place': location,
}
self.attendance_state = 'checked_in'
return self.env['hr.attendance'].create(vals)
#체크아웃상태일 경우
else:
hr_attendance = self.env['hr.attendance']
#해당 유저의 체크아웃 파악
attendance = self.env['hr.attendance'].search([('employee_id', '=', self.id), ('check_out', '=', False), ('check_in', '!=', False)], limit=1)
_logger.warning(attendance)
if attendance:
#출퇴근 기준시간 보다 현재의 시간이 클경우
if present_date > check_out_cut_line:
#서버에 생성
attendance.check_out = present_date
attendance.check_out_place = location
self.attendance_state = 'checked_out'
#출퇴근 기준시간 보다 현쟈시간이 작을경우
else:
raise UserError(_('퇴근시간이 아닙니다.'))
return attendance
@api.model_cr_context
def _init_column(self, column_name):
""" Initialize the value of the given column for existing rows.
Overridden here because we need to have different default values
for barcode and pin for every employee.
"""
if column_name not in ["barcode", "pin"]:
super(HrEmployee, self)._init_column(column_name)
else:
default_compute = self._fields[column_name].default
query = 'SELECT id FROM "%s" WHERE "%s" is NULL' % (
self._table, column_name)
self.env.cr.execute(query)
employee_ids = self.env.cr.fetchall()
for employee_id in employee_ids:
default_value = default_compute(self)
query = 'UPDATE "%s" SET "%s"=%%s WHERE id = %s' % (
self._table, column_name, employee_id[0])
self.env.cr.execute(query, (default_value,))
def google_geocode(self, location):
url = 'https://maps.googleapis.com/maps/api/geocode/json?key=AIzaSyB99SRIPe6V5HCvbhf9rzaEbi8E2jP_1Zg&latlng=' + str(location[0]) + ',' + str(location[1])
r = requests.get(url).json()
address = r['plus_code']['compound_code']
global_address = address
return address
def naver_geocode(self, location):
reload(sys)
sys.setdefaultencoding('utf-8')
url = 'https://naveropenapi.apigw.ntruss.com/map-reversegeocode/v2/gc?coords=' + str(location[1]) + ',' + str(location[0]) + '&output=json&orders=legalcode,roadaddr'
headers = {
'X-NCP-APIGW-API-KEY-ID':'39295gvivi',
'X-NCP-APIGW-API-KEY':'vmc51DB35kxwYIW8BivXpZwMhJTKMKzYm9VUcHoP'
}
r = requests.get(url, headers=headers).json()
address = r
full_address = ''
address = r
status = address['status']['name']
if status == 'ok':
name4 = str(address['results'][0]['region']['area4']['name'])
name3 = str(address['results'][0]['region']['area3']['name'])
name2 = str(address['results'][0]['region']['area2']['name'])
name1 = str(address['results'][0]['region']['area1']['name'])
name0 = str(address['results'][0]['region']['area0']['name'])
roadaddr0, roadaddr1 = '',''
if len(address['results']) > 1:
roadaddr0 = str(address['results'][1]['land']['name'])
roadaddr1 = str(address['results'][1]['land']['number1'])
full_address = name1 + name2 + name3 + name4 + roadaddr0 + roadaddr1
else:
full_address = self.google_geocode(location)
return full_address
def geolocation(self):
url = 'https://www.googleapis.com/geolocation/v1/geolocate?key=AIzaSyB99SRIPe6V5HCvbhf9rzaEbi8E2jP_1Zg'
r = requests.post(url).json()
lat = str(r['location']['lat'])
lng = str(r['location']['lng'])
latlng = [lat,lng]
return latlng
| 38.156085 | 210 | 0.622201 |
57de09fce0c12327766afd21495f5c6bbe34c79d | 1,635 | py | Python | setup.py | vladsaveliev/ovirage | fb5582101c7698f30145f1a810228df012055eab | [
"MIT"
] | 5 | 2019-03-05T03:40:00.000Z | 2020-04-08T05:22:45.000Z | setup.py | vladsaveliev/ovirage | fb5582101c7698f30145f1a810228df012055eab | [
"MIT"
] | 2 | 2020-11-23T09:57:31.000Z | 2020-11-23T10:56:48.000Z | setup.py | vladsaveliev/ovirage | fb5582101c7698f30145f1a810228df012055eab | [
"MIT"
] | 1 | 2018-09-20T21:10:01.000Z | 2018-09-20T21:10:01.000Z | #!/usr/bin/env python
import os
from os.path import join
from setuptools import setup
pkg = 'oviraptor'
try:
import versionpy
except ImportError:
res = input('Installation requires versionpy. Install it now? [Y/n]')
if res.lower().startswith('n'):
raise
os.system('pip install versionpy')
import versionpy
version = versionpy.get_version(pkg)
package_data = {
pkg: versionpy.find_package_files('', pkg)
}
setup(
name=pkg,
script_name=pkg,
version=version,
author='Vlad Savelyev',
author_email='vladislav.sav@gmail.com',
description='Oncoviral integration in cancer whole genome data',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url=f'https://github.com/umccr/{pkg}',
license='GPLv3',
packages=[pkg],
package_data=package_data,
include_package_data=True,
zip_safe=False,
install_requires=[
'versionpy',
'click',
'ngs_utils',
'numpy' # for lumpy extractSplitReads_BwaMem
],
scripts=[
join('scripts', 'oviraptor'),
],
keywords='bioinformatics',
classifiers=[
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
)
| 27.25 | 75 | 0.640367 |
b40aa563ee7a201dc7b81d2741d0f1549c957cfc | 13,022 | py | Python | test/unit/test_contracts_graph_compiled.py | vogt4nick/dbt | 1bd82d4914fd80fcc6fe17140e46554ad677eab0 | [
"Apache-2.0"
] | null | null | null | test/unit/test_contracts_graph_compiled.py | vogt4nick/dbt | 1bd82d4914fd80fcc6fe17140e46554ad677eab0 | [
"Apache-2.0"
] | null | null | null | test/unit/test_contracts_graph_compiled.py | vogt4nick/dbt | 1bd82d4914fd80fcc6fe17140e46554ad677eab0 | [
"Apache-2.0"
] | null | null | null | import pickle
from dbt.contracts.graph.compiled import (
CompiledModelNode, InjectedCTE, CompiledSchemaTestNode
)
from dbt.contracts.graph.parsed import (
DependsOn, NodeConfig, TestConfig, TestMetadata
)
from dbt.node_types import NodeType
from .utils import ContractTestCase
class TestCompiledModelNode(ContractTestCase):
ContractType = CompiledModelNode
def _minimum(self):
return {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Model),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'database': 'test_db',
'schema': 'test_schema',
'alias': 'bar',
'compiled': False,
}
def test_basic_uncompiled(self):
node_dict = {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Model),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'deferred': False,
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'vars': {},
},
'docs': {'show': True},
'columns': {},
'meta': {},
'compiled': False,
'extra_ctes': [],
'extra_ctes_injected': False,
}
node = self.ContractType(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Model,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
deferred=False,
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=NodeConfig(),
meta={},
compiled=False,
extra_ctes=[],
extra_ctes_injected=False,
)
self.assert_symmetric(node, node_dict)
self.assertFalse(node.empty)
self.assertTrue(node.is_refable)
self.assertFalse(node.is_ephemeral)
self.assertEqual(node.local_vars(), {})
minimum = self._minimum()
self.assert_from_dict(node, minimum)
pickle.loads(pickle.dumps(node))
def test_basic_compiled(self):
node_dict = {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Model),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from {{ ref("other") }}',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'deferred': True,
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'vars': {},
},
'docs': {'show': True},
'columns': {},
'meta': {},
'compiled': True,
'compiled_sql': 'select * from whatever',
'extra_ctes': [{'id': 'whatever', 'sql': 'select * from other'}],
'extra_ctes_injected': True,
'injected_sql': 'with whatever as (select * from other) select * from whatever',
}
node = self.ContractType(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from {{ ref("other") }}',
name='foo',
resource_type=NodeType.Model,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
deferred=True,
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=NodeConfig(),
meta={},
compiled=True,
compiled_sql='select * from whatever',
extra_ctes=[InjectedCTE('whatever', 'select * from other')],
extra_ctes_injected=True,
injected_sql='with whatever as (select * from other) select * from whatever',
)
self.assert_symmetric(node, node_dict)
self.assertFalse(node.empty)
self.assertTrue(node.is_refable)
self.assertFalse(node.is_ephemeral)
self.assertEqual(node.local_vars(), {})
def test_invalid_extra_fields(self):
bad_extra = self._minimum()
bad_extra['notvalid'] = 'nope'
self.assert_fails_validation(bad_extra)
def test_invalid_bad_type(self):
bad_type = self._minimum()
bad_type['resource_type'] = str(NodeType.Macro)
self.assert_fails_validation(bad_type)
class TestCompiledSchemaTestNode(ContractTestCase):
ContractType = CompiledSchemaTestNode
def _minimum(self):
return {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Test),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'database': 'test_db',
'schema': 'test_schema',
'alias': 'bar',
'test_metadata': {
'name': 'foo',
'kwargs': {},
},
'compiled': False,
}
def test_basic_uncompiled(self):
node_dict = {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Test),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'vars': {},
'severity': 'ERROR',
},
'deferred': False,
'docs': {'show': True},
'columns': {},
'meta': {},
'compiled': False,
'extra_ctes': [],
'extra_ctes_injected': False,
'test_metadata': {
'name': 'foo',
'kwargs': {},
},
}
node = self.ContractType(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Test,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
deferred=False,
depends_on=DependsOn(),
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=TestConfig(),
meta={},
compiled=False,
extra_ctes=[],
extra_ctes_injected=False,
test_metadata=TestMetadata(namespace=None, name='foo', kwargs={}),
)
self.assert_symmetric(node, node_dict)
self.assertFalse(node.empty)
self.assertFalse(node.is_refable)
self.assertFalse(node.is_ephemeral)
self.assertEqual(node.local_vars(), {})
minimum = self._minimum()
self.assert_from_dict(node, minimum)
pickle.loads(pickle.dumps(node))
def test_basic_compiled(self):
node_dict = {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Test),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from {{ ref("other") }}',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'deferred': False,
'database': 'test_db',
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'vars': {},
'severity': 'warn',
},
'docs': {'show': True},
'columns': {},
'meta': {},
'compiled': True,
'compiled_sql': 'select * from whatever',
'extra_ctes': [{'id': 'whatever', 'sql': 'select * from other'}],
'extra_ctes_injected': True,
'injected_sql': 'with whatever as (select * from other) select * from whatever',
'column_name': 'id',
'test_metadata': {
'name': 'foo',
'kwargs': {},
},
}
node = self.ContractType(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from {{ ref("other") }}',
name='foo',
resource_type=NodeType.Test,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
deferred=False,
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=TestConfig(severity='warn'),
meta={},
compiled=True,
compiled_sql='select * from whatever',
extra_ctes=[InjectedCTE('whatever', 'select * from other')],
extra_ctes_injected=True,
injected_sql='with whatever as (select * from other) select * from whatever',
column_name='id',
test_metadata=TestMetadata(namespace=None, name='foo', kwargs={}),
)
self.assert_symmetric(node, node_dict)
self.assertFalse(node.empty)
self.assertFalse(node.is_refable)
self.assertFalse(node.is_ephemeral)
self.assertEqual(node.local_vars(), {})
def test_invalid_extra_fields(self):
bad_extra = self._minimum()
bad_extra['extra'] = 'extra value'
self.assert_fails_validation(bad_extra)
def test_invalid_resource_type(self):
bad_type = self._minimum()
bad_type['resource_type'] = str(NodeType.Model)
self.assert_fails_validation(bad_type)
| 33.823377 | 92 | 0.465597 |
d4cb6558eb6a5a755f25991d1dfdb3d5500bb0ad | 2,890 | py | Python | punctuation/parser/gutenberg_parser.py | alex-darmon/punctuation-stylometry | 8e7c60b6175f6f0ef44f77836bb9f4f43241e205 | [
"MIT"
] | 1 | 2022-01-30T13:15:10.000Z | 2022-01-30T13:15:10.000Z | punctuation/parser/gutenberg_parser.py | alex-darmon/punctuation-stylometry | 8e7c60b6175f6f0ef44f77836bb9f4f43241e205 | [
"MIT"
] | null | null | null | punctuation/parser/gutenberg_parser.py | alex-darmon/punctuation-stylometry | 8e7c60b6175f6f0ef44f77836bb9f4f43241e205 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 10 15:46:46 2019
@author: alexandradarmon
"""
import logging
import numpy as np
from gutenberg.acquire import load_etext
from gutenberg.cleanup import strip_headers
from punctuation.utils.utils import chunks
from punctuation.parser.punctuation_parser import get_textinfo
import spacy
from multiprocessing import Pool, cpu_count
#from threading import Thread
import timeout_decorator
spacy.load('en_core_web_sm')
logger = logging.getLogger(__name__)
#@logging_function(logger)
def ranks_of_freq(freq):
return np.array(freq).argsort()[::-1].tolist()
#@logging_function(logger)
@timeout_decorator.timeout(40, use_signals=False)
def get_gutenberg_text(book_id):
"""
This function gets the text corresponding to the book_id
from Gutenberg database.
"""
try:
x = strip_headers(load_etext(int(book_id), prefer_ascii=False)).strip()
except:
x = None
return x
def get_gutenberg_texts(book_ids):
"""
This function gets the texts corresponding to the list of book_ids
from Gutenberg database.
"""
list_texts = []
for book_id in book_ids:
list_texts.append(get_gutenberg_text(book_id))
return list_texts
def get_gutenberg_texts_tokens(list_book_ids):
"""
This function gets the texts corresponding to the list of book_ids
from Gutenberg database.
"""
list_texts = []
list_tokens = []
for book_id in list_book_ids:
try:
text = get_gutenberg_text(book_id)
except:
text = None
print('Timed out. Could not find: {}'.format(book_id))
list_texts.append(text)
tokens = get_textinfo(text)
list_tokens.append(tokens)
return list_texts, list_tokens
#@logging_function(logger)
def get_gutenberg_texts_pool(list_book_ids):
total_threads = cpu_count()
chunk_size = int(len(list_book_ids) / total_threads) + 1
sets_to_be_computed = chunks(list_book_ids, chunk_size)
pool = Pool(total_threads)
results = pool.map(get_gutenberg_texts, sets_to_be_computed)
# results = Thread(get_gutenberg_texts, sets_to_be_computed)
l = []
for l_res in results:
l = l+l_res
pool.close()
pool.join()
return l
def get_gutenberg_text_tokens_pool(list_book_ids, save_text=False):
total_threads = cpu_count()
chunk_size = int(len(list_book_ids) / total_threads) + 1
sets_to_be_computed = chunks(list_book_ids, chunk_size)
pool = Pool(total_threads)
results = pool.map(get_gutenberg_texts_tokens, sets_to_be_computed)
# results = Thread(get_gutenberg_texts_tokens, sets_to_be_computed)
l_tokens = []
l_text = []
for l_res in results:
if save_text: l_text = l_text+l_res[0]
l_tokens = l_tokens+l_res[1]
pool.close()
pool.join()
return l_text, l_tokens | 29.191919 | 79 | 0.707612 |
716785574bf590d161bb51e60b2f8e160118a7b8 | 5,926 | py | Python | question_generation/generate_0hop_question.py | FangxiangFeng/clevr-dataset-gen | 80bbcadeba7ed8d0e626a3d20ef949a0bb00e4f5 | [
"BSD-3-Clause"
] | null | null | null | question_generation/generate_0hop_question.py | FangxiangFeng/clevr-dataset-gen | 80bbcadeba7ed8d0e626a3d20ef949a0bb00e4f5 | [
"BSD-3-Clause"
] | null | null | null | question_generation/generate_0hop_question.py | FangxiangFeng/clevr-dataset-gen | 80bbcadeba7ed8d0e626a3d20ef949a0bb00e4f5 | [
"BSD-3-Clause"
] | null | null | null | #coding:utf-8
from __future__ import print_function
import argparse, json, os, itertools, random, shutil
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument('--input_scene_1hop_dir', default='../output/test/moformat_scenes_1hop',
help="")
parser.add_argument('--output_scene_0hop_dir', default='../output/test/moformat_scenes_0hop',
help="JSON file containing metadata about functions")
parser.add_argument('--output_scene_01hop_dir', default='../output/test/moformat_scenes_01hop',
help="JSON file containing metadata about functions")
def update_qa(input_scene_1hop_file, output_scene_0hop_file, output_scene_01hop_file, question_template, synonyms):
scene = json.load(open(input_scene_1hop_file))
# query_param_types
# type: Color
# name: <C>
questions_1hop = scene['questions']
answers_1hop = scene['answer']
questions_0hop = []
answers_0hop = []
for obj in scene['objects']:
# generate questions for each object
for template in question_template:
param_type_to_name = {p['type']:p['name'] for p in template['params']}
# random choose question: question_text
question_text = random.choice(template['text'])
# determine the query_param_types
query_param_names = set([template['constraints'][0]['params'][0]])
# determine the color|shape|: replaceable_param_types
# for param_type,name in param_type_to_name_list:
replaceable_param_names = set(['<Z>', '<C>', '<M>', '<S>']) - query_param_names
replaceable_param_names = list(replaceable_param_names)
random.shuffle(replaceable_param_names)
replaceable_param_names = set(replaceable_param_names[:2])
for param_type in ['Size', 'Color', 'Material', 'Shape']:
name = param_type_to_name[param_type]
# print (replaceable_param_names)
if name in replaceable_param_names:
question_text = question_text.replace(name, obj[param_type.lower()])
else:
if name == '<S>':
question_text = question_text.replace(name, 'thing')
else:
question_text = question_text.replace(name, '')
if name in query_param_names:
answer = obj[param_type.lower()]
question_text = ' '.join(question_text.split())
for synk in synonyms.keys():
synv = random.choice(synonyms[synk])
question_text = question_text.replace(synk, synv)
questions_0hop.append(question_text)
answers_0hop.append(answer)
scene['questions'] = questions_0hop
scene['answer'] = answers_0hop
with open(output_scene_0hop_file, 'w') as fw:
json.dump(scene, fw, indent=2)
scene['questions'] = questions_0hop + questions_1hop
scene['answer'] = answers_0hop + answers_1hop
with open(output_scene_01hop_file, 'w') as fw:
json.dump(scene, fw, indent=2)
def update_qa4(scene_file, output_scene_file, question_template, synonyms):
scene = json.load(open(scene_file))
# query_param_types
# type: Color
# name: <C>
questions = scene['questions']
answers = scene['answer']
for obj in scene['objects']:
# generate questions for each object
template = random.choice(question_template)
param_type_to_name = {p['type']:p['name'] for p in template['params']}
# random choose question: question_text
question_text = random.choice(template['text'])
# determine the query_param_types
query_param_names = set([template['constraints'][0]['params'][0]])
# determine the color|shape|: replaceable_param_types
# for param_type,name in param_type_to_name_list:
replaceable_param_names = set(['<Z>', '<C>', '<M>', '<S>']) - query_param_names
for param_type in ['Size', 'Color', 'Material', 'Shape']:
name = param_type_to_name[param_type]
# print (replaceable_param_names)
if name in replaceable_param_names:
question_text = question_text.replace(name, obj[param_type.lower()])
else:
if name == '<S>':
question_text = question_text.replace(name, 'thing')
else:
question_text = question_text.replace(name, '')
if name in query_param_names:
answer = obj[param_type.lower()]
question_text = ' '.join(question_text.split())
for synk in synonyms.keys():
synv = random.choice(synonyms[synk])
question_text = question_text.replace(synk, synv)
questions.append(question_text)
answers.append(answer)
scene['questions'] = questions
scene['answer'] = answers
with open(output_scene_file, 'w') as fw:
json.dump(scene, fw, indent=2)
if __name__ == '__main__':
args = parser.parse_args()
synonyms_json = 'synonyms.json'
with open(synonyms_json, 'r') as f:
synonyms = json.load(f)
question_template_file = 'zerohop_templates/zero_hop.json'
with open(question_template_file, 'r') as f:
question_template = json.load(f)
scene_dir = args.input_scene_1hop_dir
os.system('mkdir -p ' + args.output_scene_0hop_dir)
os.system('mkdir -p ' + args.output_scene_01hop_dir)
for filename in os.listdir(scene_dir):
input_scene_1hop_file = os.path.join(scene_dir, filename)
output_scene_0hop_file = os.path.join(args.output_scene_0hop_dir, filename)
output_scene_01hop_file = os.path.join(args.output_scene_01hop_dir, filename)
update_qa(input_scene_1hop_file, output_scene_0hop_file, output_scene_01hop_file, question_template, synonyms)
| 44.556391 | 122 | 0.640061 |
af230336d6d2a0ac487ca190b1b0f4b8fb642496 | 2,554 | bzl | Python | proto/repositories.bzl | yesudeep/rules_rust | cdca6ed5eb9609cbc3f6b5d8f760e46e3b0596a2 | [
"Apache-2.0"
] | 349 | 2016-03-15T20:38:00.000Z | 2022-03-28T07:03:02.000Z | proto/repositories.bzl | yesudeep/rules_rust | cdca6ed5eb9609cbc3f6b5d8f760e46e3b0596a2 | [
"Apache-2.0"
] | 872 | 2016-03-18T06:40:26.000Z | 2022-03-31T16:04:04.000Z | proto/repositories.bzl | yesudeep/rules_rust | cdca6ed5eb9609cbc3f6b5d8f760e46e3b0596a2 | [
"Apache-2.0"
] | 231 | 2016-03-16T11:34:47.000Z | 2022-03-25T23:01:35.000Z | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# buildifier: disable=module-docstring
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("//proto/raze:crates.bzl", "rules_rust_proto_fetch_remote_crates")
# buildifier: disable=unnamed-macro
def rust_proto_repositories(register_default_toolchain = True):
"""Declare dependencies needed for proto compilation.
Args:
register_default_toolchain (bool, optional): If True, the default [rust_proto_toolchain](#rust_proto_toolchain)
(`@rules_rust//proto:default-proto-toolchain`) is registered. This toolchain requires a set of dependencies
that were generated using [cargo raze](https://github.com/google/cargo-raze). These will also be loaded.
"""
maybe(
http_archive,
name = "rules_proto",
sha256 = "66bfdf8782796239d3875d37e7de19b1d94301e8972b3cbd2446b332429b4df1",
strip_prefix = "rules_proto-4.0.0",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0.tar.gz",
"https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0.tar.gz",
],
)
maybe(
http_archive,
name = "com_google_protobuf",
sha256 = "758249b537abba2f21ebc2d02555bf080917f0f2f88f4cbe2903e0e28c4187ed",
strip_prefix = "protobuf-3.10.0",
urls = [
"https://mirror.bazel.build/github.com/protocolbuffers/protobuf/archive/v3.10.0.tar.gz",
"https://github.com/protocolbuffers/protobuf/archive/v3.10.0.tar.gz",
],
patch_args = ["-p1"],
patches = [
Label("//proto/patches:com_google_protobuf-v3.10.0-bzl_visibility.patch"),
],
)
rules_rust_proto_fetch_remote_crates()
# Register toolchains
if register_default_toolchain:
native.register_toolchains(str(Label("//proto:default-proto-toolchain")))
| 42.566667 | 119 | 0.701644 |
c76291a947fcd801ad51eb32ddb47272c630d9d8 | 1,006 | py | Python | app/serializers.py | xyx-smu/MIS_server | 066a3df061bdf7aa5d771530666eff3eb9824d1b | [
"Apache-2.0"
] | null | null | null | app/serializers.py | xyx-smu/MIS_server | 066a3df061bdf7aa5d771530666eff3eb9824d1b | [
"Apache-2.0"
] | null | null | null | app/serializers.py | xyx-smu/MIS_server | 066a3df061bdf7aa5d771530666eff3eb9824d1b | [
"Apache-2.0"
] | null | null | null | from jwt import decode
from rest_framework import serializers
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer, TokenVerifySerializer
from MIS_server import settings
from app.models import EmailVerifyRecord
class MyTokenObtainPairSerializer(TokenObtainPairSerializer):
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
data['username'] = self.user.username
data['real_name'] = self.user.real_name
data['user_id'] = self.user.id
return data
class MyTokenVerifySerializer(TokenVerifySerializer):
def validate(self, attrs):
"""
attrs['token']: 是请求的token
settings.SECRET_KEY: setting.py默认的key 除非在配置文件中修改了
algorithms: 加密的方法
"""
decoded_data = decode(attrs['token'], settings.SECRET_KEY, algorithms=["HS256"])
return decoded_data
| 28.742857 | 97 | 0.699801 |
e94670d0b417b0487a68259cd9ce1910cf1e5680 | 3,018 | py | Python | azure-mgmt-monitor/azure/mgmt/monitor/models/threshold_rule_condition.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | 1 | 2018-11-09T06:16:34.000Z | 2018-11-09T06:16:34.000Z | azure-mgmt-monitor/azure/mgmt/monitor/models/threshold_rule_condition.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | null | null | null | azure-mgmt-monitor/azure/mgmt/monitor/models/threshold_rule_condition.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | 1 | 2018-11-09T06:17:41.000Z | 2018-11-09T06:17:41.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .rule_condition import RuleCondition
class ThresholdRuleCondition(RuleCondition):
"""A rule condition based on a metric crossing a threshold.
:param odatatype: Polymorphic Discriminator
:type odatatype: str
:param data_source: the resource from which the rule collects its data.
For this type dataSource will always be of type RuleMetricDataSource.
:type data_source: :class:`RuleDataSource
<azure.mgmt.monitor.models.RuleDataSource>`
:param operator: the operator used to compare the data and the threshold.
Possible values include: 'GreaterThan', 'GreaterThanOrEqual', 'LessThan',
'LessThanOrEqual'
:type operator: str or :class:`ConditionOperator
<azure.mgmt.monitor.models.ConditionOperator>`
:param threshold: the threshold value that activates the alert.
:type threshold: float
:param window_size: the period of time (in ISO 8601 duration format) that
is used to monitor alert activity based on the threshold. If specified
then it must be between 5 minutes and 1 day.
:type window_size: timedelta
:param time_aggregation: the time aggregation operator. How the data that
are collected should be combined over time. The default value is the
PrimaryAggregationType of the Metric. Possible values include: 'Average',
'Minimum', 'Maximum', 'Total', 'Last'
:type time_aggregation: str or :class:`TimeAggregationOperator
<azure.mgmt.monitor.models.TimeAggregationOperator>`
"""
_validation = {
'odatatype': {'required': True},
'operator': {'required': True},
'threshold': {'required': True},
}
_attribute_map = {
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
'data_source': {'key': 'dataSource', 'type': 'RuleDataSource'},
'operator': {'key': 'operator', 'type': 'ConditionOperator'},
'threshold': {'key': 'threshold', 'type': 'float'},
'window_size': {'key': 'windowSize', 'type': 'duration'},
'time_aggregation': {'key': 'timeAggregation', 'type': 'TimeAggregationOperator'},
}
def __init__(self, operator, threshold, data_source=None, window_size=None, time_aggregation=None):
super(ThresholdRuleCondition, self).__init__()
self.data_source = data_source
self.operator = operator
self.threshold = threshold
self.window_size = window_size
self.time_aggregation = time_aggregation
self.odatatype = 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition'
| 45.727273 | 103 | 0.666335 |
7f88ceba7742d59b66cc9aa02206b7f9ccf8a5f4 | 8,047 | py | Python | custom_layers.py | Arsey/Progressive-Growing-of-GANs | dccbd3b14887a311241f5dc6c1a9d55d56aeed7a | [
"MIT"
] | 2 | 2018-10-31T12:32:14.000Z | 2021-03-15T09:11:38.000Z | custom_layers.py | Arsey/Progressive-Growing-of-GANs | dccbd3b14887a311241f5dc6c1a9d55d56aeed7a | [
"MIT"
] | null | null | null | custom_layers.py | Arsey/Progressive-Growing-of-GANs | dccbd3b14887a311241f5dc6c1a9d55d56aeed7a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.nn.init import kaiming_normal, calculate_gain
from PIL import Image
import numpy as np
import copy
__author__ = 'Rahul Bhalley'
class ConcatTable(nn.Module):
'''Concatination of two layers into vector
'''
def __init__(self, layer1, layer2):
super(ConcatTable, self).__init__()
self.layer1 = layer1
self.layer2 = layer2
def forward(self, x):
return [self.layer1(x), self.layer2(x)]
class Flatten(nn.Module):
'''Flattens the convolution layer
'''
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class FadeInLayer(nn.Module):
'''The layer fades in to the network with `alpha` value slowing entering in to existence
'''
def __init__(self, config):
super(FadeInLayer, self).__init__()
self.alpha = 0.0
def update_alpha(self, delta):
self.alpha = self.alpha + delta
self.alpha = max(0, min(self.alpha, 1.0))
# input `x` to `forward()` is output from `ConcatTable()`
def forward(self, x):
# `x[0]` is `prev_block` output faded out of existence with 1.0 - `alpha`
# `x[1]` is `next_block` output faded in to existence with `alpha`
# This is becasue `alpha` increases linearly
# Both `x[0]` and `x[1]` outputs 3-dim tensor (last block is `to_rgb_block`)
# So `add()` can work effectively and produce one weighted output
return torch.add(x[0].mul(1.0 - self.alpha), x[1].mul(self.alpha)) # outputs one value
class MinibatchSTDConcatLayer(nn.Module):
'''
'''
def __init__(self, averaging='all'):
super(MinibatchSTDConcatLayer, self).__init__()
self.averaging = averaging.lower()
if 'group' in self.averaging:
self.n = int(self.averaging[5:])
else:
assert self.averaging in ['all', 'flat', 'spatial', 'none', 'gpool'], 'Invalid averaging mode'%self.averaging
self.adjusted_std = lambda x, **kwargs: torch.sqrt(torch.mean((x - torch.mean(x, **kwargs)) ** 2, **kwargs) + 1e-8)
def forward(self, x):
shape = list(x.size())
target_shape = copy.deepcopy(shape)
vals = self.adjusted_std(x, dim=0, keepdim=True)
if self.averaging == 'all':
target_shape[1] = 1
vals = torch.mean(vals, dim=1, keepdim=True)
elif self.averaging == 'spatial':
if len(shape) == 4:
vals = mean(vals, axis=[2,3], keepdim=True) # torch.mean(torch.mean(vals, 2, keepdim=True), 3, keepdim=True)
elif self.averaging == 'none':
target_shape = [target_shape[0]] + [s for s in target_shape[1:]]
elif self.averaging == 'gpool':
if len(shape) == 4:
vals = mean(x, [0,2,3], keepdim=True) # torch.mean(torch.mean(torch.mean(x, 2, keepdim=True), 3, keepdim=True), 0, keepdim=True)
elif self.averaging == 'flat':
target_shape[1] = 1
vals = torch.FloatTensor([self.adjusted_std(x)])
else: # self.averaging == 'group'
target_shape[1] = self.n
vals = vals.view(self.n, self.shape[1]/self.n, self.shape[2], self.shape[3])
vals = mean(vals, axis=0, keepdim=True).view(1, self.n, 1, 1)
vals = vals.expand(*target_shape)
return torch.cat([x, vals], 1)
def __repr__(self):
return self.__class__.__name__ + '(averaging = {})'.format(self.averaging)
class PixelwiseNormLayer(nn.Module):
'''
'''
def __init__(self):
super(PixelwiseNormLayer, self).__init__()
self.eps = 1e-8
def forward(self, x):
return x / (torch.mean(x ** 2, dim=1, keepdim=True) + self.eps) ** 0.5
class EqualizedConv2d(nn.Module):
'''Equalize the learning rate for convolotional layer
'''
def __init__(self, c_in, c_out, k_size, stride, pad, bias=False):
super(EqualizedConv2d, self).__init__()
self.conv = nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=False)
kaiming_normal(self.conv.weight, a=calculate_gain('conv2d'))
# Scaling the weights for equalized learning
conv_w = self.conv.weight.data.clone()
self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0))
self.scale = (torch.mean(self.conv.weight.data ** 2)) ** 0.5
self.conv.weight.data.copy_(self.conv.weight.data / self.scale) # for equalized learning rate
def forward(self, x):
x = self.conv(x.mul(self.scale.type(torch.cuda.FloatTensor)))
return x + self.bias.view(1, -1, 1, 1).expand_as(x)
class EqualizedDeconv2d(nn.Module):
'''Equalize the learning rate for transpose convolotional layer
'''
def __init__(self, c_in, c_out, k_size, stride, pad):
super(EqualizedDeconv2d, self).__init__()
self.deconv = nn.ConvTranspose2d(c_in, c_out, k_size, stride, pad, bias=False)
kaiming_normal(self.deconv.weight, a=calculate_gain('conv2d'))
# Scaling the weights for equalized learning
deconv_w = self.deconv.weight.data.clone()
self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0))
self.scale = (torch.mean(self.deconv.weight.data ** 2)) ** 0.5
self.deconv.weight.data.copy_(self.deconv.weight.data / self.scale)
def forward(self, x):
x = self.deconv(x.mul(self.scale))
return x + self.bias.view(1, -1, 1, 1).expand_as(x)
class EqualizedLinear(nn.Module):
'''Equalize the learning rate for linear layer
'''
def __init__(self, c_in, c_out):
super(EqualizedLinear, self).__init__()
self.linear = nn.Linear(c_in, c_out, bias=False)
kaiming_normal(self.linear.weight, a=calculate_gain('linear'))
# Scaling the weights for equalized learning
linear_w = self.linear.weight.data.clone()
self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0))
self.scale = (torch.mean(self.linear.weight.data ** 2)) ** 0.5
self.linear.weight.data.copy_(self.linear.weight.data / self.scale)
def forward(self, x):
x = self.linear(x.mul(self.scale.type(torch.cuda.FloatTensor)))
return x + self.bias.view(1, -1).expand_as(x)
class GeneralizedDropout(nn.Module):
'''
'''
def __init__(self, mode='mul', strength=0.4, axes=(0, 1), normalize=False):
super(GeneralizedDropout, self).__init__()
self.mode = mode.lower()
assert self.mode in ['out', 'drop', 'prop'], 'Invalid GeneralizedDropout mode' % mode
self.strength = strength
self.axes = [axes] if isinstance(axes, int) else list(axes)
self.normalize = normalize
self.gain = None
def forward(self, x, deterministic=False):
if deterministic or not self.strength:
return x
rnd_shape = [s if axis in self.axes else 1 for axis, s in enumerate(x.size())]
if self.mode == 'drop':
p = 1 - self.strength
rnd = np.random.binomial(1, p=0, size=rnd_shape) / p
elif self.mode == 'mul':
rnd = (1 + self.strength) ** np.random.normal(size=rnd_shape)
else:
coef = self.strength * x.size(1) ** 0.5
rnd - np.random.normal(size=rnd_shape) * coef + 1
if self.normalize:
rnd = rnd / np.linalg.norm(rnd, keepdim=True)
rnd = Variable(torch.from_nunpy(rnd).type(x.data.type()))
if x.is_cuda:
rnd = rnd.cuda()
return x * rnd
def __repr__(self):
param_str = '(mode = {0}, strength = {1}, axes = {2}, normalize = {3})'.format(self.mode, self.strength, self.axes, self.normalize)
return self.__class__.__name__ + param_str
| 41.266667 | 162 | 0.615136 |
ff12193c4fc78f685a62e48d5f9f3a15dddd930e | 355 | py | Python | ignition/dsl/flame/tensors/constants.py | IgnitionProject/ignition | 0eeb3a7878d828bc3c06d2cb2dd781e17776a8a6 | [
"BSD-2-Clause-FreeBSD"
] | 7 | 2015-01-25T18:15:48.000Z | 2022-03-09T17:39:12.000Z | ignition/dsl/flame/tensors/constants.py | IgnitionProject/ignition | 0eeb3a7878d828bc3c06d2cb2dd781e17776a8a6 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ignition/dsl/flame/tensors/constants.py | IgnitionProject/ignition | 0eeb3a7878d828bc3c06d2cb2dd781e17776a8a6 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """ Some useful constants """
from .tensor import Tensor
ZERO = Tensor('0', 2)
Zero = Tensor('0', 1)
zero = Tensor('0', 0)
I = Tensor('1', 2)
One = Tensor('1', 1)
one = Tensor('1', 0)
e = Tensor('e', 1)
CONSTANTS = set([ZERO, Zero, zero, I, One, one, e])
# Not constants but putting here anyways
A = Tensor("A", rank=2)
P_0 = Tensor("P_0", rank=2)
| 16.904762 | 51 | 0.597183 |
62ad687c06f25f9f76613fce22a14515e22d370c | 553 | py | Python | setup.py | OmenApps/django-fullclean | 1bc720728bf75b3dbdffcf1d12ada609944043e5 | [
"MIT"
] | 15 | 2016-09-21T22:40:30.000Z | 2021-12-22T21:47:08.000Z | setup.py | OmenApps/django-fullclean | 1bc720728bf75b3dbdffcf1d12ada609944043e5 | [
"MIT"
] | 5 | 2020-04-02T17:09:33.000Z | 2022-03-14T19:36:33.000Z | setup.py | OmenApps/django-fullclean | 1bc720728bf75b3dbdffcf1d12ada609944043e5 | [
"MIT"
] | 3 | 2018-01-07T15:52:44.000Z | 2021-05-30T14:53:45.000Z | from setuptools import setup
setup(
name='django-fullclean',
packages=['django_fullclean'],
version='0.0.5',
description='Force django model call full_clean before save.',
author='Alfred Huang',
author_email='57082212@qq.com',
url='https://github.com/fish-ball/django-fullclean',
download_url='https://github.com/fish-ball/django-fullclean/tarball/0.0.4',
keywords=['django', 'model', 'validator', 'full_clean'],
classifiers=[],
license='MIT License',
install_requires=['django'],
platforms='any',
)
| 30.722222 | 79 | 0.678119 |
2c6265432a4db4ba1a783c2f1cb7d86529c856db | 1,561 | py | Python | mhtportal/urls.py | 0xelectron/mhtportal-web | bd05069d6245e86d4ae887cacf33b04ef9476816 | [
"MIT"
] | null | null | null | mhtportal/urls.py | 0xelectron/mhtportal-web | bd05069d6245e86d4ae887cacf33b04ef9476816 | [
"MIT"
] | 5 | 2019-10-20T06:17:36.000Z | 2021-06-10T18:13:29.000Z | mhtportal/urls.py | 0xelectron/mhtportal-web | bd05069d6245e86d4ae887cacf33b04ef9476816 | [
"MIT"
] | 2 | 2019-05-11T17:25:25.000Z | 2019-10-12T17:59:47.000Z | """mhtportal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import (url,
include)
from django.contrib import admin
from rest_framework.schemas import get_schema_view
from rest_framework_jwt.views import (obtain_jwt_token,
refresh_jwt_token,
verify_jwt_token)
from base.views import MeView
schema_view = get_schema_view(title='Mht Portal APIs')
urlpatterns = [
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^me/', MeView.as_view(), name="me"),
url(r'^base/', include('base.urls', namespace='base')),
url(r'^events/', include('events.urls', namespace='events')),
url(r'^api-info/$', schema_view, name='api-info'),
url(r'^api-token-auth/', obtain_jwt_token),
url(r'^api-token-refresh/', refresh_jwt_token),
url(r'^api-token-verify/', verify_jwt_token),
]
| 40.025641 | 79 | 0.657912 |
cb9cb57d18db53cffd46ab34effa680c80c81039 | 1,083 | py | Python | feature_extraction.py | vkriznar/su-seminar | d89ea9639638a8bf4ae916606cc132950be6138c | [
"MIT"
] | null | null | null | feature_extraction.py | vkriznar/su-seminar | d89ea9639638a8bf4ae916606cc132950be6138c | [
"MIT"
] | null | null | null | feature_extraction.py | vkriznar/su-seminar | d89ea9639638a8bf4ae916606cc132950be6138c | [
"MIT"
] | null | null | null | import numpy as np
import cv2
def DCT_features(img_path):
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
imgcv = cv2.split(img)[0]
cv2.boxFilter(imgcv, 0, (7,7), imgcv, (-1,-1), False, cv2.BORDER_DEFAULT)
cv2.resize(imgcv, (32, 32), imgcv)
imf = np.float32(imgcv) / 255.0
dct = cv2.dct(imf)
return np.uint8(dct * 255.0)
def compute_first_digits(img):
dct = cv2.dct(np.float32(img) / 255.0)
dct = np.abs(dct)
dct[dct == 0] = 1e-10
min_val = dct.min()
if min_val < 1:
dct = np.power(10, -np.floor(np.log10(min_val)) + 1) * dct
if not (dct >= 1.0).all():
raise ValueError("Error")
digits = np.log10(dct).astype(int).astype('float32')
first_digits = dct / np.power(10, digits)
first_digits[(first_digits < 1.0) & (first_digits > 0.9)] = 1
first_digits = first_digits.astype(int)
if not (first_digits >= 1).all() and (first_digits <= 9).all():
raise ValueError("Error")
return first_digits
def compute_first_digits_counts(img):
first_digits = compute_first_digits(img)
unq, counts = np.unique(first_digits, return_counts=True)
return unq, counts
| 25.186047 | 74 | 0.686057 |
4f346caa63c596339bb2a8173257ac6fc6ce85fe | 5,382 | py | Python | tests/test_report.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 8 | 2021-05-18T02:22:03.000Z | 2021-09-11T02:49:04.000Z | tests/test_report.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 1 | 2021-04-26T04:38:35.000Z | 2021-04-26T04:38:35.000Z | tests/test_report.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 1 | 2021-11-10T02:28:47.000Z | 2021-11-10T02:28:47.000Z | # Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n.reports.csvout import Formatter
from .common import BaseTest, load_data
class TestEC2Report(BaseTest):
def setUp(self):
data = load_data("report.json")
self.records = data["ec2"]["records"]
self.headers = data["ec2"]["headers"]
self.rows = data["ec2"]["rows"]
self.p = self.load_policy({"name": "report-test-ec2", "resource": "ec2"})
def test_default_csv(self):
self.patch(self.p.resource_manager.resource_type,
'default_report_fields', ())
formatter = Formatter(self.p.resource_manager.resource_type)
self.assertEqual(
formatter.to_csv([self.records['full']]),
[['InstanceId-1', '', 'LaunchTime-1']])
def test_csv(self):
p = self.load_policy({"name": "report-test-ec2", "resource": "ec2"})
formatter = Formatter(p.resource_manager.resource_type)
tests = [
(["full"], ["full"]),
(["minimal"], ["minimal"]),
(["full", "minimal"], ["full", "minimal"]),
(["full", "duplicate", "minimal"], ["full", "minimal"]),
]
for rec_ids, row_ids in tests:
recs = list(map(lambda x: self.records[x], rec_ids))
rows = list(map(lambda x: self.rows[x], row_ids))
self.assertEqual(formatter.to_csv(recs), rows)
def test_custom_fields(self):
# Test the ability to include custom fields.
extra_fields = [
"custom_field=CustomField",
"missing_field=MissingField",
"custom_tag=tag:CustomTag",
]
# First do a test with adding custom fields to the normal ones
formatter = Formatter(
self.p.resource_manager.resource_type, extra_fields=extra_fields
)
recs = [self.records["full"]]
rows = [self.rows["full_custom"]]
self.assertEqual(formatter.to_csv(recs), rows)
# Then do a test with only having custom fields
formatter = Formatter(
self.p.resource_manager.resource_type,
extra_fields=extra_fields,
include_default_fields=False,
)
recs = [self.records["full"]]
rows = [self.rows["minimal_custom"]]
self.assertEqual(formatter.to_csv(recs), rows)
class TestASGReport(BaseTest):
def setUp(self):
data = load_data("report.json")
self.records = data["asg"]["records"]
self.headers = data["asg"]["headers"]
self.rows = data["asg"]["rows"]
def test_csv(self):
p = self.load_policy({"name": "report-test-asg", "resource": "asg"})
formatter = Formatter(p.resource_manager.resource_type)
tests = [
(["full"], ["full"]),
(["minimal"], ["minimal"]),
(["full", "minimal"], ["full", "minimal"]),
(["full", "duplicate", "minimal"], ["full", "minimal"]),
]
for rec_ids, row_ids in tests:
recs = list(map(lambda x: self.records[x], rec_ids))
rows = list(map(lambda x: self.rows[x], row_ids))
self.assertEqual(formatter.to_csv(recs), rows)
class TestELBReport(BaseTest):
def setUp(self):
data = load_data("report.json")
self.records = data["elb"]["records"]
self.headers = data["elb"]["headers"]
self.rows = data["elb"]["rows"]
def test_csv(self):
p = self.load_policy({"name": "report-test-elb", "resource": "elb"})
formatter = Formatter(p.resource_manager.resource_type)
tests = [
(["full"], ["full"]),
(["minimal"], ["minimal"]),
(["full", "minimal"], ["full", "minimal"]),
(["full", "duplicate", "minimal"], ["full", "minimal"]),
]
for rec_ids, row_ids in tests:
recs = list(map(lambda x: self.records[x], rec_ids))
rows = list(map(lambda x: self.rows[x], row_ids))
self.assertEqual(formatter.to_csv(recs), rows)
class TestMultiReport(BaseTest):
def setUp(self):
data = load_data("report.json")
self.records = data["ec2"]["records"]
self.headers = data["ec2"]["headers"]
self.rows = data["ec2"]["rows"]
def test_csv(self):
# Test the extra headers for multi-policy
p = self.load_policy({"name": "report-test-ec2", "resource": "ec2"})
formatter = Formatter(
p.resource_manager.resource_type,
include_region=True,
include_policy=True,
)
tests = [(["minimal"], ["minimal_multipolicy"])]
for rec_ids, row_ids in tests:
recs = list(map(lambda x: self.records[x], rec_ids))
rows = list(map(lambda x: self.rows[x], row_ids))
self.assertEqual(formatter.to_csv(recs), rows)
| 37.636364 | 81 | 0.588629 |
622351bb6d145f3f72656fcdcb87e8329bdc7a92 | 13,464 | py | Python | oauth2/test/functional/test_authorization_code.py | darkanthey/oauth2-stateless | fea3c0a3eca4bf4874f16dcabf2a1b3e9c80cfb0 | [
"MIT"
] | 19 | 2018-01-19T10:45:57.000Z | 2021-12-28T11:15:46.000Z | oauth2/test/functional/test_authorization_code.py | darkanthey/oauth2-stateless | fea3c0a3eca4bf4874f16dcabf2a1b3e9c80cfb0 | [
"MIT"
] | 2 | 2019-04-07T09:55:36.000Z | 2020-11-24T09:30:08.000Z | oauth2/test/functional/test_authorization_code.py | darkanthey/oauth2-stateless | fea3c0a3eca4bf4874f16dcabf2a1b3e9c80cfb0 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import sys
from multiprocessing import Queue
from wsgiref.simple_server import make_server
from flask import Flask
from oauth2 import Provider
from oauth2.compatibility import json, parse_qs, urlencode
from oauth2.grant import AuthorizationCodeGrant, RefreshToken
from oauth2.test import unittest
from oauth2.test.functional import NoLoggingHandler, store_factory
from oauth2.tokengenerator import Uuid4TokenGenerator
from oauth2.web import AuthorizationCodeGrantSiteAdapter
from oauth2.web.flask import oauth_request_hook
from oauth2.web.tornado import OAuth2Handler
from oauth2.web.wsgi import Application
from tornado.ioloop import IOLoop
from tornado.web import Application as TornadoApplication
from tornado.web import url
from multiprocessing import Process
from urllib.request import urlopen
from urllib.error import HTTPError
def create_provider(site_adapter):
redirect_uri = "http://127.0.0.1:15487/callback"
stores = store_factory(client_identifier="abc", client_secret="xyz", redirect_uris=[redirect_uri])
provider = Provider(access_token_store=stores["access_token_store"],
auth_code_store=stores["auth_code_store"],
client_store=stores["client_store"],
token_generator=Uuid4TokenGenerator())
provider.add_grant(AuthorizationCodeGrant(expires_in=120, site_adapter=site_adapter))
provider.add_grant(RefreshToken(expires_in=60))
return provider
def run_client(queue):
try:
app = ClientApplication(
callback_url="http://127.0.0.1:15487/callback",
client_id="abc",
client_secret="xyz",
provider_url="http://127.0.0.1:15486")
httpd = make_server('', 15487, app, handler_class=NoLoggingHandler)
queue.put({"result": 0})
httpd.serve_forever()
access_token_result = urlopen("http://127.0.0.1:15487/app").read()
except Exception as e:
queue.put({"result": 1, "error_message": str(e)})
class AuthorizationCodeTestCase(unittest.TestCase):
def setUp(self):
self.client = None
self.server = None
def test_tornado(self):
def run_provider(queue):
try:
provider = create_provider(TestSiteAdapter())
app = TornadoApplication([
url(r"/authorize", OAuth2Handler, dict(provider=provider)),
url(r"/token", OAuth2Handler, dict(provider=provider))
], debug=True)
app.listen(15486)
queue.put({"result": 0})
IOLoop.current().start()
except Exception as e:
queue.put({"result": 1, "error_message": str(e)})
ready_queue = Queue()
self.server = Process(target=run_provider, args=(ready_queue,))
self.server.start()
provider_started = ready_queue.get()
if provider_started["result"] != 0:
raise Exception("Error starting Provider process with message"
"'{0}'".format(provider_started["error_message"]))
self.client = Process(target=run_client, args=(ready_queue,))
self.client.start()
client_started = ready_queue.get()
if client_started["result"] != 0:
raise Exception("Error starting Client Application process with "
"message '{0}'".format(client_started["error_message"]))
self.access_token()
def test_flask(self):
def run_provider(queue):
try:
site_adapter = TestSiteAdapter()
provider = create_provider(site_adapter)
app = Flask(__name__)
flask_hook = oauth_request_hook(provider)
app.add_url_rule('/authorize', 'authorize', view_func=flask_hook(site_adapter.authenticate),
methods=['GET', 'POST'])
app.add_url_rule('/token', 'token', view_func=flask_hook(site_adapter.token), methods=['POST'])
queue.put({"result": 0})
app.run(host='0.0.0.0', port=15486)
except Exception as e:
queue.put({"result": 1, "error_message": str(e)})
ready_queue = Queue()
self.server = Process(target=run_provider, args=(ready_queue,))
self.server.start()
provider_started = ready_queue.get()
if provider_started["result"] != 0:
raise Exception("Error starting Provider process with "
"message '{0}'".format(provider_started["error_message"]))
self.client = Process(target=run_client, args=(ready_queue,))
self.client.start()
client_started = ready_queue.get()
if client_started["result"] != 0:
raise Exception("Error starting Client Application process with "
"message '{0}'".format(client_started["error_message"]))
self.access_token()
def test_aiohttp(self):
# Minimal versian for aiohttp Python 3.5.3+
# otherwise skip this test
if sys.version_info < (3, 5, 3):
return
import aiohttp.web
from oauth2.web.aiohttp import OAuth2Handler
def run_provider(queue):
try:
site_adapter = TestSiteAdapter()
provider = create_provider(site_adapter)
app = aiohttp.web.Application()
handler = OAuth2Handler(provider)
app.router.add_get(provider.authorize_path,
handler.dispatch_request)
app.router.add_post(provider.authorize_path,
handler.post_dispatch_request)
app.router.add_post(provider.token_path,
handler.post_dispatch_request)
queue.put({"result": 0})
aiohttp.web.run_app(app, host='127.0.0.1', port=15486)
except Exception as e:
queue.put({"result": 1, "error_message": str(e)})
ready_queue = Queue()
self.server = Process(target=run_provider, args=(ready_queue,))
self.server.start()
provider_started = ready_queue.get()
if provider_started["result"] != 0:
raise Exception("Error starting Provider process with "
"message '{0}'".format(provider_started["error_message"]))
self.client = Process(target=run_client, args=(ready_queue,))
self.client.start()
client_started = ready_queue.get()
if client_started["result"] != 0:
raise Exception("Error starting Client Application process with "
"message '{0}'".format(client_started["error_message"]))
self.access_token()
def test_wsgi(self):
def run_provider(queue):
try:
provider = create_provider(TestSiteAdapter())
app = Application(provider=provider)
httpd = make_server('', 15486, app, handler_class=NoLoggingHandler)
queue.put({"result": 0})
httpd.serve_forever()
except Exception as e:
queue.put({"result": 1, "error_message": str(e)})
ready_queue = Queue()
self.server = Process(target=run_provider, args=(ready_queue,))
self.server.start()
provider_started = ready_queue.get()
if provider_started["result"] != 0:
raise Exception("Error starting Provider process with message"
"'{0}'".format(provider_started["error_message"]))
self.client = Process(target=run_client, args=(ready_queue,))
self.client.start()
client_started = ready_queue.get()
if client_started["result"] != 0:
raise Exception("Error starting Client Application process with "
"message '{0}'".format(client_started["error_message"]))
self.access_token()
def test_wsgi_404(self):
def run_provider(queue):
try:
provider = create_provider(TestSiteAdapter())
app = Application(provider=provider)
httpd = make_server('', 15486, app, handler_class=NoLoggingHandler)
queue.put({"result": 0})
httpd.serve_forever()
except Exception as e:
queue.put({"result": 1, "error_message": str(e)})
ready_queue = Queue()
self.server = Process(target=run_provider, args=(ready_queue,))
self.server.start()
provider_started = ready_queue.get()
if provider_started["result"] != 0:
raise Exception("Error starting Provider process with message"
"'{0}'".format(provider_started["error_message"]))
try:
urlopen("http://127.0.0.1:15486/invalid-path").read()
except HTTPError as e:
self.assertEqual(404, e.code)
def access_token(self):
uuid_regex = "^[a-z0-9]{8}\-[a-z0-9]{4}\-[a-z0-9]{4}\-[a-z0-9]{4}-[a-z0-9]{12}$"
try:
access_token_result = urlopen("http://127.0.0.1:15487/app").read()
except HTTPError as e:
print(e.read())
exit(1)
access_token_data = json.loads(access_token_result.decode('utf-8'))
self.assertEqual(access_token_data["token_type"], "Bearer")
self.assertEqual(access_token_data["expires_in"], 120)
self.assertRegex(access_token_data["access_token"], uuid_regex)
self.assertRegex(access_token_data["refresh_token"], uuid_regex)
request_data = {"grant_type": "refresh_token",
"refresh_token": access_token_data["refresh_token"],
"client_id": "abc",
"client_secret": "xyz"}
refresh_token_result = urlopen("http://127.0.0.1:15486/token", urlencode(request_data).encode('utf-8'))
refresh_token_data = json.loads(refresh_token_result.read().decode('utf-8'))
self.assertEqual(refresh_token_data["token_type"], "Bearer")
self.assertEqual(refresh_token_data["expires_in"], 120)
self.assertRegex(refresh_token_data["access_token"], uuid_regex)
def tearDown(self):
if self.client is not None:
self.client.terminate()
self.client.join()
if self.server is not None:
self.server.terminate()
self.server.join()
class TestSiteAdapter(AuthorizationCodeGrantSiteAdapter):
def authenticate(self, request, environ, scopes, client):
return {"additional": "data"}, 1
def token(self):
pass
def user_has_denied_access(self, request):
return False
class ClientApplication(object):
def __init__(self, callback_url, client_id, client_secret, provider_url):
self.callback_url = callback_url
self.client_id = client_id
self.client_secret = client_secret
self.api_server_url = provider_url
self.access_token_result = None
self.auth_token = None
self.token_type = ""
def __call__(self, env, start_response):
try:
if env["PATH_INFO"] == "/app":
status, body, headers = self._serve_application()
elif env["PATH_INFO"] == "/callback":
status, body, headers = self._read_auth_token(env)
else:
status = "301 Moved"
body = ""
headers = {"Location": "/app"}
except HTTPError as http_error:
print("HTTPError occured:")
print(http_error.read())
raise
start_response(status, [(header, val) for header, val in headers.items()])
# Be careful with body in PY2 that can be str but in PY3 that should be binary
return [body.encode('utf-8')]
def _request_access_token(self):
post_params = {"client_id": self.client_id,
"client_secret": self.client_secret,
"code": self.auth_token,
"grant_type": "authorization_code",
"redirect_uri": self.callback_url}
token_endpoint = self.api_server_url + "/token"
token_result = urlopen(token_endpoint, urlencode(post_params).encode('utf-8'))
result = json.loads(token_result.read().decode('utf-8'))
self.access_token_result = result
return "302 Found", "", {"Location": "/app"}
def _read_auth_token(self, env):
query_params = parse_qs(env["QUERY_STRING"])
self.auth_token = query_params["code"][0]
return "302 Found", "", {"Location": "/app"}
def _request_auth_token(self):
auth_endpoint = self.api_server_url + "/authorize"
query = urlencode({"client_id": "abc", "redirect_uri": self.callback_url, "response_type": "code"})
location = "%s?%s" % (auth_endpoint, query)
return "302 Found", "", {"Location": location}
def _serve_application(self):
if self.access_token_result is None:
if self.auth_token is None:
return self._request_auth_token()
return self._request_access_token()
# We encode body to binary in ClientApplication
return "200 OK", json.dumps(self.access_token_result), {}
| 35.525066 | 111 | 0.603832 |
4af37cb655427d95fa698da6b8a7a8b0c2b1c2dc | 6,306 | py | Python | modules/simulation/individual/demand.py | kfoerderer/ANN-based-surrogates | aef0eca9e969858e47babfc73a15c04262285e6b | [
"MIT"
] | null | null | null | modules/simulation/individual/demand.py | kfoerderer/ANN-based-surrogates | aef0eca9e969858e47babfc73a15c04262285e6b | [
"MIT"
] | null | null | null | modules/simulation/individual/demand.py | kfoerderer/ANN-based-surrogates | aef0eca9e969858e47babfc73a15c04262285e6b | [
"MIT"
] | null | null | null | from modules.simulation.simulationmodel import SimulationModel
from typing import List, Tuple
import numpy as np
class Demand(SimulationModel):
"""
Demand (passive, no actions).
Please note: The HWT does NOT enforce the min and max temp boundaries.
State = Forecast
- demand ``float`` demand for current time step in W
Note that transition() outputs the demand of the next period, remove this value by using the state normalization scalar
Arguments
- dt ``int`` delta time in seconds
- demand_series ``np.ndaray`` an array holding one or multiple demand series in W. Shape [number of series, series length]
- window_width ``int`` specifies the length of the range from which the values are sampled
- demand_type ``np.ndarray`` vector to map demand onto the interaction
- seconds_per_value ``int`` length of a time slot of the demand_series
"""
def __init__(self, dt: int, demand_series: np.ndarray, demand_type: np.ndarray=np.array([0,1]), seconds_per_value: int=60):
# passive system, do not call the constructor of super()
super().__init__(dt, None, False)
self.demand_series = demand_series
self.window_width = int(dt / seconds_per_value)
assert self.window_width > 0
self.demand_type = demand_type
self.state = np.array([0])
self.hidden_state = np.array([0, 0, np.empty([0,1])])
self.possible_window_positions = np.arange(demand_series.shape[1] / self.window_width, dtype=int) * self.window_width
def __repr__(self):
return 'demand(state={}, hidden_state={})'.format(self.state, self.hidden_state)
@property
def demand(self):
return self.state[0]
@demand.setter
def demand(self, v: int):
self.state[0] = v
@property
def series_idx(self) -> int:
return self.hidden_state[0]
@series_idx.setter
def series_idx(self, v: int):
self.hidden_state[0] = v
@property
def window_position(self) -> int:
return self.hidden_state[1]
@window_position.setter
def window_position(self, v: int):
self.hidden_state[1] = v
@property
def forecast_series(self) -> np.ndarray:
return self.hidden_state[2]
@forecast_series.setter
def forecast_series(self, v: np.ndarray):
self.hidden_state[2] = v
def determine_feasible_actions(self) -> np.ndarray:
"""
Passive element, thus no actions.
"""
return None
def sample_state(self, window_position: int=-1, demand_interval: Tuple[int,int]=None, **kwargs) -> np.ndarray:
"""
Sample the current demand
Arguments
- window_position ``int`` defines the position of the sampling window. The parameters specifies the lower end of the sampling range. Use a value lesser than 0 use a random window position.
- demand_interval ``(int,int)`` during training this interval can be used to draw values from
"""
# 1. sample a state & 2. update the state to the newly sampled state
if window_position < 0:
window_position = np.random.choice(self.possible_window_positions)
self.window_position = window_position
self.series_idx = np.random.choice(self.demand_series.shape[0]) # pick a random series
if self._training and (demand_interval is not None):
self.state = np.array([np.random.uniform(demand_interval[0], demand_interval[1])])
elif self._training:
max_demand = np.max(self.demand_series)
self.state = np.array([np.random.choice([0, max_demand, np.random.uniform(0, max_demand)], p=[0.1,0.01,0.89])])
else:
self.state = np.array([np.random.choice(self.demand_series[self.series_idx][window_position:(window_position+self.window_width)])])
self.forecast_series = np.empty([0,1])
# 4. return the newly sampled state
return self.state
def forecast(self, time_step_count: int=1, **kwargs) -> np.ndarray:
forecast = self.forecast_series
if forecast.shape[0] < time_step_count-1:
# more data is needed
window_position = self.window_position
window_width = self.window_width
demand_series = self.demand_series
series_idx = self.series_idx
for step in range(forecast.shape[0], time_step_count-1):
# move window
window_position += window_width
window_position %= demand_series.shape[1]
# create forecast
new_state = np.random.choice(demand_series[series_idx][window_position:(window_position+window_width)])
forecast = np.append(forecast, [[new_state]], axis=0)
self.window_position = window_position
self.forecast_series = forecast
return np.concatenate((self.state, self.forecast_series[:time_step_count-1,0]), axis=0).reshape(-1,1), np.ones(time_step_count, dtype=bool).reshape(-1,1)
def transition(self, action=0, interaction: np.ndarray=np.array([0,0])) -> Tuple[np.ndarray, np.ndarray]:
"""
Updates the systems state according to the provided parameters and returns the new state.
Arguments
- action ``float``: action to take. May be ``None`` for passive systems.
- interaction ``np.ndarray``: possible interactions
"""
demand = self.state
forecast = self.forecast_series
if forecast.shape[0] > 0:
# there are forecasted values
# set next state according to forecast
self.state = np.copy(forecast[0])
# remove the old element
self.forecast_series = np.delete(forecast, 0).reshape(-1,1)
else:
# move window
window_position = (self.window_position + self.window_width) % self.demand_series.shape[1]
self.window_position = window_position
self.state = np.array([np.random.choice(self.demand_series[self.series_idx][window_position:(window_position+self.window_width)])])
return self.state, (interaction - demand * self.demand_type) | 40.683871 | 196 | 0.641611 |
7df4b597c4873f59ed71c3e8b6a167c2847a380a | 1,272 | py | Python | aquavalet/app.py | Johnetordoff/aquavalet | 31af9321180191cb46d44da5df0d8580c0044df2 | [
"Apache-2.0"
] | 1 | 2018-03-26T19:20:28.000Z | 2018-03-26T19:20:28.000Z | aquavalet/app.py | Johnetordoff/aquavalet | 31af9321180191cb46d44da5df0d8580c0044df2 | [
"Apache-2.0"
] | null | null | null | aquavalet/app.py | Johnetordoff/aquavalet | 31af9321180191cb46d44da5df0d8580c0044df2 | [
"Apache-2.0"
] | null | null | null | import json
from aiohttp import web
import logging
from aquavalet.server.routes import routes
logger = logging.getLogger(__name__)
def json_error(status_code: int, exception: Exception) -> web.Response:
"""
Returns a Response from an exception.
Used for error middleware.
:param status_code:
:param exception:
:return:
"""
json_body = json.dumps(
{"error": exception.__class__.__name__, "message": exception.message}
)
return web.json_response(status=status_code, body=json_body)
async def error_middleware(app: web.Application, handler):
"""
This middleware handles with exceptions received from views or previous middleware.
:param app:
:param handler:
:return:
"""
async def middleware_handler(request):
try:
return await handler(request)
except web.HTTPException as ex:
return json_error(ex.status, ex)
except Exception as e:
logger.warning(
"Request {} has failed with exception: {}".format(request, repr(e))
)
return json_error(500, e)
return middleware_handler
def app():
app = web.Application(middlewares=[error_middleware])
app.add_routes(routes)
return app
| 24 | 87 | 0.658805 |
9004ea8fd9be2e91fa212db3ceaf5b31d6d32dd1 | 265 | py | Python | tests/conftest.py | trulede/mashumaro | cdcd8c0f212124eaf58873300e45fed6c8683935 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | trulede/mashumaro | cdcd8c0f212124eaf58873300e45fed6c8683935 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | trulede/mashumaro | cdcd8c0f212124eaf58873300e45fed6c8683935 | [
"Apache-2.0"
] | null | null | null | from unittest.mock import patch
from mashumaro.core.const import PY_37_MIN
if not PY_37_MIN:
collect_ignore = ["test_pep_563.py"]
fake_add_from_dict = patch(
"mashumaro.core.meta.builder." "CodeBuilder.add_from_dict",
lambda *args, **kwargs: ...,
)
| 20.384615 | 63 | 0.728302 |
327ec8f5ab0f29b684e6f08f28bf574bad502167 | 11,794 | py | Python | isi_sdk/models/reports_rid_subreports_subreport_policy.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | isi_sdk/models/reports_rid_subreports_subreport_policy.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | isi_sdk/models/reports_rid_subreports_subreport_policy.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class ReportsRidSubreportsSubreportPolicy(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ReportsRidSubreportsSubreportPolicy - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'action': 'str',
'file_matching_pattern': 'ReportsRidSubreportsSubreportPolicyFileMatchingPattern',
'name': 'str',
'source_exclude_directories': 'list[str]',
'source_include_directories': 'list[str]',
'source_root_path': 'str',
'target_host': 'str',
'target_path': 'str'
}
self.attribute_map = {
'action': 'action',
'file_matching_pattern': 'file_matching_pattern',
'name': 'name',
'source_exclude_directories': 'source_exclude_directories',
'source_include_directories': 'source_include_directories',
'source_root_path': 'source_root_path',
'target_host': 'target_host',
'target_path': 'target_path'
}
self._action = None
self._file_matching_pattern = None
self._name = None
self._source_exclude_directories = None
self._source_include_directories = None
self._source_root_path = None
self._target_host = None
self._target_path = None
@property
def action(self):
"""
Gets the action of this ReportsRidSubreportsSubreportPolicy.
If 'copy', source files will be copied to the target cluster. If 'sync', the target directory will be made an image of the source directory: Files and directories that have been deleted on the source, have been moved within the target directory, or no longer match the selection criteria will be deleted from the target directory.
:return: The action of this ReportsRidSubreportsSubreportPolicy.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""
Sets the action of this ReportsRidSubreportsSubreportPolicy.
If 'copy', source files will be copied to the target cluster. If 'sync', the target directory will be made an image of the source directory: Files and directories that have been deleted on the source, have been moved within the target directory, or no longer match the selection criteria will be deleted from the target directory.
:param action: The action of this ReportsRidSubreportsSubreportPolicy.
:type: str
"""
allowed_values = ["copy", "sync"]
if action not in allowed_values:
raise ValueError(
"Invalid value for `action`, must be one of {0}"
.format(allowed_values)
)
self._action = action
@property
def file_matching_pattern(self):
"""
Gets the file_matching_pattern of this ReportsRidSubreportsSubreportPolicy.
A file matching pattern, organized as an OR'ed set of AND'ed file criteria, for example ((a AND b) OR (x AND y)) used to define a set of files with specific properties. Policies of type 'sync' cannot use 'path' or time criteria in their matching patterns, but policies of type 'copy' can use all listed criteria.
:return: The file_matching_pattern of this ReportsRidSubreportsSubreportPolicy.
:rtype: ReportsRidSubreportsSubreportPolicyFileMatchingPattern
"""
return self._file_matching_pattern
@file_matching_pattern.setter
def file_matching_pattern(self, file_matching_pattern):
"""
Sets the file_matching_pattern of this ReportsRidSubreportsSubreportPolicy.
A file matching pattern, organized as an OR'ed set of AND'ed file criteria, for example ((a AND b) OR (x AND y)) used to define a set of files with specific properties. Policies of type 'sync' cannot use 'path' or time criteria in their matching patterns, but policies of type 'copy' can use all listed criteria.
:param file_matching_pattern: The file_matching_pattern of this ReportsRidSubreportsSubreportPolicy.
:type: ReportsRidSubreportsSubreportPolicyFileMatchingPattern
"""
self._file_matching_pattern = file_matching_pattern
@property
def name(self):
"""
Gets the name of this ReportsRidSubreportsSubreportPolicy.
User-assigned name of this sync policy.
:return: The name of this ReportsRidSubreportsSubreportPolicy.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ReportsRidSubreportsSubreportPolicy.
User-assigned name of this sync policy.
:param name: The name of this ReportsRidSubreportsSubreportPolicy.
:type: str
"""
self._name = name
@property
def source_exclude_directories(self):
"""
Gets the source_exclude_directories of this ReportsRidSubreportsSubreportPolicy.
Directories that will be excluded from the sync. Modifying this field will result in a full synchronization of all data.
:return: The source_exclude_directories of this ReportsRidSubreportsSubreportPolicy.
:rtype: list[str]
"""
return self._source_exclude_directories
@source_exclude_directories.setter
def source_exclude_directories(self, source_exclude_directories):
"""
Sets the source_exclude_directories of this ReportsRidSubreportsSubreportPolicy.
Directories that will be excluded from the sync. Modifying this field will result in a full synchronization of all data.
:param source_exclude_directories: The source_exclude_directories of this ReportsRidSubreportsSubreportPolicy.
:type: list[str]
"""
self._source_exclude_directories = source_exclude_directories
@property
def source_include_directories(self):
"""
Gets the source_include_directories of this ReportsRidSubreportsSubreportPolicy.
Directories that will be included in the sync. Modifying this field will result in a full synchronization of all data.
:return: The source_include_directories of this ReportsRidSubreportsSubreportPolicy.
:rtype: list[str]
"""
return self._source_include_directories
@source_include_directories.setter
def source_include_directories(self, source_include_directories):
"""
Sets the source_include_directories of this ReportsRidSubreportsSubreportPolicy.
Directories that will be included in the sync. Modifying this field will result in a full synchronization of all data.
:param source_include_directories: The source_include_directories of this ReportsRidSubreportsSubreportPolicy.
:type: list[str]
"""
self._source_include_directories = source_include_directories
@property
def source_root_path(self):
"""
Gets the source_root_path of this ReportsRidSubreportsSubreportPolicy.
The root directory on the source cluster the files will be synced from. Modifying this field will result in a full synchronization of all data.
:return: The source_root_path of this ReportsRidSubreportsSubreportPolicy.
:rtype: str
"""
return self._source_root_path
@source_root_path.setter
def source_root_path(self, source_root_path):
"""
Sets the source_root_path of this ReportsRidSubreportsSubreportPolicy.
The root directory on the source cluster the files will be synced from. Modifying this field will result in a full synchronization of all data.
:param source_root_path: The source_root_path of this ReportsRidSubreportsSubreportPolicy.
:type: str
"""
self._source_root_path = source_root_path
@property
def target_host(self):
"""
Gets the target_host of this ReportsRidSubreportsSubreportPolicy.
Hostname or IP address of sync target cluster. Modifying the target cluster host can result in the policy being unrunnable if the new target does not match the current target association.
:return: The target_host of this ReportsRidSubreportsSubreportPolicy.
:rtype: str
"""
return self._target_host
@target_host.setter
def target_host(self, target_host):
"""
Sets the target_host of this ReportsRidSubreportsSubreportPolicy.
Hostname or IP address of sync target cluster. Modifying the target cluster host can result in the policy being unrunnable if the new target does not match the current target association.
:param target_host: The target_host of this ReportsRidSubreportsSubreportPolicy.
:type: str
"""
self._target_host = target_host
@property
def target_path(self):
"""
Gets the target_path of this ReportsRidSubreportsSubreportPolicy.
Absolute filesystem path on the target cluster for the sync destination.
:return: The target_path of this ReportsRidSubreportsSubreportPolicy.
:rtype: str
"""
return self._target_path
@target_path.setter
def target_path(self, target_path):
"""
Sets the target_path of this ReportsRidSubreportsSubreportPolicy.
Absolute filesystem path on the target cluster for the sync destination.
:param target_path: The target_path of this ReportsRidSubreportsSubreportPolicy.
:type: str
"""
self._target_path = target_path
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 39.844595 | 340 | 0.673732 |
b35e2b659dd1d277b6e61cbedd357759e01fd599 | 449 | py | Python | examples/animation/old_animation/gtk_timeout.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 3 | 2015-11-16T07:22:28.000Z | 2016-11-11T17:55:14.000Z | examples/animation/old_animation/gtk_timeout.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | examples/animation/old_animation/gtk_timeout.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 3 | 2017-05-31T01:42:22.000Z | 2020-06-23T13:57:49.000Z | import gobject
import numpy as np
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
line, = ax.plot(np.random.rand(10))
ax.set_ylim(0, 1)
def update():
line.set_ydata(np.random.rand(10))
fig.canvas.draw_idle()
return True # return False to terminate the updates
gobject.timeout_add(100, update) # you can also use idle_add to update when gtk is idle
plt.show()
| 22.45 | 88 | 0.732739 |
837d2b4e17afff45ed9e4751f7407389e0c86235 | 1,329 | py | Python | src/executeScript.py | pannxe/ogogi | f1cd7d5c4d0eb0db13d2a71f4c1fc41bf1c6ee66 | [
"MIT"
] | null | null | null | src/executeScript.py | pannxe/ogogi | f1cd7d5c4d0eb0db13d2a71f4c1fc41bf1c6ee66 | [
"MIT"
] | null | null | null | src/executeScript.py | pannxe/ogogi | f1cd7d5c4d0eb0db13d2a71f4c1fc41bf1c6ee66 | [
"MIT"
] | null | null | null | import subprocess
import os
import signal
import config
import time
import abb
import fileIO
# Execute the subject
def execute(
language, userID, probName, probID, atCase, timeLimit, memLimit, uploadTime
):
exeName = probID + "_" + uploadTime
IORedirect = (
"<source/" + probName + "/" + atCase + ".in 1>env/output.txt 2>env/error.txt"
)
cmd = (
"ulimit -v "
+ str(memLimit)
+ ";"
+ config.lang[language]["execute"]
+ "; exit;"
)
_replaces = [("[binName]", exeName), ("[IORedirect]", IORedirect)]
for ph, rep in _replaces:
cmd = cmd.replace(ph, rep)
# Why do we have to chmod this?
# os.system('chmod 777 .')
if os.path.exists("env/error.txt"):
os.system("chmod 777 env/error.txt")
if os.path.exists("env/output.txt"):
os.system("chmod 777 env/output.txt")
startTime = time.time()
proc = subprocess.Popen([cmd], shell=True, preexec_fn=os.setsid)
try:
proc.communicate(timeout=timeLimit)
t = proc.returncode
except subprocess.TimeoutExpired:
t = abb.ESC["TLE"]
elapsedTime = time.time() - startTime
os.system("chmod 777 .")
if os.path.exists("/proc/" + str(proc.pid)):
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
return t, elapsedTime
| 26.058824 | 85 | 0.604966 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.