text
stringlengths 2
999k
|
|---|
import requests
import json
import logging
from .api_client import APIClient
from ..exceptions.UnauthorizedException import UnauthorizedException
from requests_toolbelt.multipart.encoder import MultipartEncoder
# child class of APIClient --> Extends error handling functionality
# MessageClient class contains a series of functions corresponding to all
# messaging endpoints on the REST API.
class MessageClient(APIClient):
def __init__(self, bot_client):
self.bot_client = bot_client
self.config = self.bot_client.get_sym_config()
if self.config.data['proxyURL']:
self.proxies = {
"https": 'https://' + self.config.data['proxyURL'] + ':'
+ str(self.config.data['proxyPort'])
}
else:
self.proxies = {}
def get_msg_from_stream(self, stream_id, since):
logging.debug('MessageClient/getMessages()')
headers = {
'sessionToken': self.bot_client.get_sym_auth().get_session_token(),
'keyManagerToken': self.bot_client.get_sym_auth().get_key_manager_token()
}
url = self.config.data['agentHost'] + \
'/agent/v4/stream/{0}/message?since={1}'.format(stream_id, since)
response = requests.get(url, headers=headers, proxies=self.proxies)
if response.status_code == 204:
result = []
return result
elif response.status_code == 200:
return json.loads(response.text)
else:
try:
super().handle_error(response, self.bot_client)
except UnauthorizedException:
self.get_msg_from_stream(stream_id, since)
def send_msg(self, stream_id, outbound_msg):
print(stream_id + " streamID to play with")
logging.debug('MessageClient/createMessage()')
url = self.config.data['agentHost'] + \
'/agent/v4/stream/{0}/message/create'.format(stream_id)
headers = {
'sessionToken': self.bot_client.get_sym_auth().get_session_token(),
'keyManagerToken': self.bot_client.get_sym_auth().get_key_manager_token()
}
response = requests.post(url, files=outbound_msg,
headers=headers, proxies=self.proxies)
if response.status_code == 204:
result = []
return result
elif response.status_code == 200:
return json.loads(response.text)
else:
try:
super().handle_error(response, self.bot_client)
except UnauthorizedException:
self.send_msg(stream_id, outbound_msg)
def send_msg_with_attachment(self, stream_id, msg,
filename, path_to_attachment):
url = self.config.data['agentHost'] + \
'/agent/v4/stream/{0}/message/create'.format(stream_id)
print(url)
data = MultipartEncoder(
fields={'message': msg,
'attachment': (
filename, open(path_to_attachment, 'rb'), 'file')}
)
headers = {
'sessionToken': self.bot_client.get_sym_auth().get_session_token(),
'keyManagerToken': self.bot_client.get_sym_auth().get_key_manager_token(),
'Content-Type': data.content_type
}
response = requests.post(url, data=data, headers=headers,
proxies=self.proxies)
if response.status_code == 204:
result = []
return result
elif response.status_code == 200:
return json.loads(response.text)
else:
try:
super().handle_error(response, self.bot_client)
except UnauthorizedException:
self.send_msg(stream_id, msg)
pass
def get_msg_attachments(self, stream_id, msg_id, file_id):
logging.debug('MessageClient/getAttachments()')
headers = {
'sessionToken': self.bot_client.get_sym_auth().get_session_token(),
'keyManagerToken': self.bot_client.get_sym_auth().get_key_manager_token()
}
url = self.config.data['agentHost'] + \
'/agent/v1/stream/{0}/attachment?msg_id={1}&file_id={2}'\
.format(stream_id, msg_id, file_id)
response = requests.get(url, headers=headers, proxies=self.proxies)
if response.status_code == 204:
result = []
return result
elif response.status_code == 200:
return json.loads(response.text)
else:
try:
super().handle_error(response, self.bot_client)
except UnauthorizedException:
self.get_msg_attachments(stream_id, msg_id, file_id)
# go on admin clients --> Contains sample data just for example's sake
def import_message(self):
logging.debug('MessageClient/import_message()')
headers = {
'sessionToken': self.bot_client.get_sym_auth().get_session_token(),
'keyManagerToken': self.bot_client.get_sym_auth().get_key_manager_token()
}
url = self.config.data['agentHost']+'/agent/v4/message/import'
payload = {
"message": "<messageML>Imported message</messageML>",
"format": "MESSAGEML",
"intendedMessageTimestamp": 1433045622000,
"intendedMessageFromUserId": 7215545057281,
"originatingSystemId": "",
"originalMessageId": "",
"streamId": ""
}
response = requests.post(
url, headers=headers,
data=payload, proxies=self.proxies
)
if response.status_code == 204:
result = []
return result
elif response.status_code == 200:
return json.loads(response.text)
else:
try:
super().handle_error(response, self.bot_client)
except UnauthorizedException:
self.import_message()
# go on admin clients
def suppress_message(self, id):
logging.debug('MessageClient/suppress_message()')
headers = {
'sessionToken': self.bot_client.get_sym_auth().get_session_token(),
'keyManagerToken': self.bot_client.get_sym_auth().get_key_manager_token()
}
url = self.config.data['podHost'] + \
'/pod/v1/admin/messagesuppression/{0}/suppress'.format(id)
response = requests.post(url, headers=headers, proxies=self.proxies)
if response.status_code == 204:
result = []
return result
elif response.status_code == 200:
return json.loads(response.text)
else:
try:
super().handle_error(response, self.bot_client)
except UnauthorizedException:
self.suppress_message(id)
def post_msg_search(self):
logging.debug('MessageClient/post_msg_search()')
headers = {
'sessionToken': self.bot_client.get_sym_auth().get_session_token(),
'keyManagerToken': self.bot_client.get_sym_auth().get_key_manager_token()
}
url = self.config.data['agentHost']+'/agent/v1/message/search'
payload = {
'hashtag': 'reed'
}
response = requests.post(
url, headers=headers,
json=payload, proxies=self.proxies
)
if response.status_code == 204:
result = []
return result
elif response.status_code == 200:
return json.loads(response.text)
else:
try:
super().handle_error(response, self.bot_client)
except UnauthorizedException:
self.post_msg_search(id)
# contains sample query for example
def get_msg_search(self):
logging.debug('MessageClient/get_msg_search()')
headers = {
'sessionToken': self.bot_client.get_sym_auth().get_session_token(),
'keyManagerToken': self.bot_client.get_sym_auth().get_key_manager_token()
}
url = self.config.data['agentHost']+'/agent/v1/message/search'
query = {
'query': 'hashtag:reed'
}
response = requests.get(url, headers=headers,
params=query, proxies=self.proxies)
if response.status_code == 204:
result = []
return result
elif response.status_code == 200:
return json.loads(response.text)
else:
try:
super().handle_error(response, self.bot_client)
except UnauthorizedException:
self.get_msg_search(id)
def get_msg_status(self, msg_id):
logging.debug('MessageClient/get_msg_status()')
headers = {
'sessionToken': self.bot_client.get_sym_auth().get_session_token()
}
url = self.config.data['podHost'] + '/pod/v1/message/{0}/status'\
.format(msg_id)
response = requests.get(url, headers=headers, proxies=self.proxies)
if response.status_code == 204:
result = []
return result
elif response.status_code == 200:
return json.loads(response.text)
else:
try:
super().handle_error(response, self.bot_client)
except UnauthorizedException:
self.get_msg_status(msg_id)
def get_supported_attachment_types(self):
logging.debug('MessageClient/getAttachmentTypes()')
headers = {
'sessionToken': self.bot_client.get_sym_auth().get_session_token()
}
url = self.config.data['podHost']+'/pod/v1/files/allowedTypes'
response = requests.get(url, headers=headers, proxies=self.proxies)
if response.status_code == 204:
result = []
return result
elif response.status_code == 200:
logging.debug('200')
return json.loads(response.text)
else:
logging.debug(response.status_code)
try:
super().handle_error(response, self.bot_client)
except UnauthorizedException:
self.get_supported_attachment_types()
|
import torch
import torch.nn as nn
from typing import Union, Sequence
from .backbone import MLP
class QNet(nn.Module):
"""
A simple network for DQN and its variants.
Parameters
----------
state_dim : int
Dimension of state space
action_dim : int
Dimension of action space
hidden_size : Sequence[int]
A list of sizes for all middle linear layers.
activation : Union[nn.Module, Sequence[nn.Module]], optional, default=nn.ReLu()
Activation function(s) after each layer. You can pass an activation
function to be used for all layers, or a list of activation functions
for different layers. ``None`` to no activation.
softmax : bool, optional, default=False
Apply a softmax over the last layer's output or not
dueling : bool, optional, default=False
Use dueling network or not (for Dueling DQN)
"""
def __init__(
self,
state_dim: int,
action_dim: int,
hidden_size: Sequence[int] = (256, 256, 256),
activation: Union[nn.Module, Sequence[nn.Module]] = nn.ReLU(),
softmax: bool = False,
dueling: bool = False
) -> None:
super(QNet, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.activation = activation
self.softmax = softmax
self.dueling = dueling
self.hidden_size = [state_dim] + list(hidden_size) + [action_dim]
self.make()
def make(self) -> None:
if self.dueling:
self.core = MLP(self.hidden_size[:-1], self.activation, activ_last_layer=True)
self.advantage = MLP([self.core.out_dim, self.action_dim], None)
self.value = MLP([self.core.out_dim, 1], None)
else:
self.core = MLP(self.hidden_size, self.activation)
def forward(self, x: torch.Tensor) -> torch.Tensor:
logits = self.core(x)
if self.dueling:
v, adv = self.value(logits), self.advantage(logits)
logits = v + adv - adv.mean(dim=1, keepdim=True)
if self.softmax:
logits = torch.softmax(logits, dim=-1)
return logits
|
from django.test import TestCase
from ..core import CalculatedExtractionProduct
class TestCalculatedExtractionProduct(TestCase):
def test_should_create_list(self):
# given
ores = {"1": 100, "2": 200} # keys are string because they come from JSON
# when
lst = CalculatedExtractionProduct.create_list_from_dict(ores)
# then
self.assertListEqual(
lst,
[
CalculatedExtractionProduct(ore_type_id=1, volume=100),
CalculatedExtractionProduct(ore_type_id=2, volume=200),
],
)
|
import json
import sys
import uuid
import dcoscli
import docopt
import rollbar
import six
from concurrent.futures import ThreadPoolExecutor
from dcos import http, mesos, util
from dcoscli.constants import (ROLLBAR_SERVER_POST_KEY,
SEGMENT_IO_CLI_ERROR_EVENT,
SEGMENT_IO_CLI_EVENT, SEGMENT_IO_WRITE_KEY_PROD,
SEGMENT_URL)
from requests.auth import HTTPBasicAuth
logger = util.get_logger(__name__)
session_id = uuid.uuid4().hex
def wait_and_track(subproc):
"""
Run a command and report it to analytics services.
:param subproc: Subprocess to capture
:type subproc: Popen
:returns: exit code of subproc
:rtype: int
"""
rollbar.init(ROLLBAR_SERVER_POST_KEY, 'prod')
conf = util.get_config()
report = conf.get('core.reporting', True)
with ThreadPoolExecutor(max_workers=2) as pool:
if report:
_segment_track_cli(pool, conf)
exit_code, err = wait_and_capture(subproc)
# We only want to catch exceptions, not other stderr messages
# (such as "task does not exist", so we look for the 'Traceback'
# string. This only works for python, so we'll need to revisit
# this in the future when we support subcommands written in other
# languages.
if report and 'Traceback' in err:
_track_err(pool, exit_code, err, conf)
return exit_code
def wait_and_capture(subproc):
"""
Run a subprocess and capture its stderr.
:param subproc: Subprocess to capture
:type subproc: Popen
:returns: exit code of subproc
:rtype: int
"""
err = ''
while subproc.poll() is None:
line = subproc.stderr.readline().decode('utf-8')
err += line
sys.stderr.write(line)
sys.stderr.flush()
exit_code = subproc.poll()
return exit_code, err
def _segment_track(event, conf, properties):
"""
Send a segment.io 'track' event
:param event: name of event
:type event: string
:param conf: dcos config file
:type conf: Toml
:param properties: event properties
:type properties: dict
:rtype: None
"""
data = {'event': event,
'properties': properties}
if 'core.email' in conf:
data['userId'] = conf['core.email']
else:
data['anonymousId'] = session_id
_segment_request('track', data)
def segment_identify(conf):
"""
Send a segment.io 'identify' event
:param conf: dcos config file
:type conf: Toml
:rtype: None
"""
if 'core.email' in conf:
data = {'userId': conf.get('core.email')}
_segment_request('identify', data)
def _segment_request(path, data):
"""
Send a segment.io HTTP request
:param path: URL path
:type path: str
:param data: json POST data
:type data: dict
:rtype: None
"""
key = SEGMENT_IO_WRITE_KEY_PROD
try:
# Set both the connect timeout and the request timeout to 1s,
# to prevent rollbar from hanging the CLI commands
http.post('{}/{}'.format(SEGMENT_URL, path),
json=data,
auth=HTTPBasicAuth(key, ''),
timeout=(1, 1))
except Exception as e:
logger.exception(e)
def _track_err(pool, exit_code, err, conf):
"""
Report error details to analytics services.
:param pool: thread pool
:type pool: ThreadPoolExecutor
:param exit_code: exit code of tracked process
:type exit_code: int
:param err: stderr of tracked process
:type err: str
:param conf: dcos config file
:type conf: Toml
:rtype: None
"""
# Segment.io calls are async, but rollbar is not, so for
# parallelism, we must call segment first.
_segment_track_err(pool, conf, err, exit_code)
_rollbar_track_err(conf, err, exit_code)
def _segment_track_cli(pool, conf):
"""
Send segment.io cli event.
:param pool: thread pool
:type pool: ThreadPoolExecutor
:param conf: dcos config file
:type conf: Toml
:rtype: None
"""
props = _base_properties(conf)
pool.submit(_segment_track, SEGMENT_IO_CLI_EVENT, conf, props)
def _segment_track_err(pool, conf, err, exit_code):
"""
Send segment.io error event.
:param pool: thread pool
:type segment: ThreadPoolExecutor
:param conf: dcos config file
:type conf: Toml
:param err: stderr of tracked process
:type err: str
:param exit_code: exit code of tracked process
:type exit_code: int
:rtype: None
"""
props = _base_properties(conf)
props['err'] = err
props['exit_code'] = exit_code
pool.submit(_segment_track, SEGMENT_IO_CLI_ERROR_EVENT, conf, props)
def _rollbar_track_err(conf, err, exit_code):
"""
Report to rollbar. Synchronous.
:param exit_code: exit code of tracked process
:type exit_code: int
:param err: stderr of tracked process
:type err: str
:param conf: dcos config file
:type conf: Toml
:rtype: None
"""
props = _base_properties(conf)
props['exit_code'] = exit_code
lines = err.split('\n')
if len(lines) >= 2:
title = lines[-2]
else:
title = err
props['stderr'] = err
try:
rollbar.report_message(title, 'error', extra_data=props)
except Exception as e:
logger.exception(e)
def _command():
""" Return the subcommand used in this dcos process.
:returns: subcommand used in this dcos process
:rtype: str
"""
# avoid circular import
import dcoscli.main
args = docopt.docopt(dcoscli.main._doc(),
help=False,
options_first=True)
return args['<command>']
def _base_properties(conf=None):
"""
These properties are sent with every analytics event.
:param conf: dcos config file
:type conf: Toml
:rtype: dict
"""
if not conf:
conf = util.get_config()
if len(sys.argv) > 1:
cmd = 'dcos ' + _command()
full_cmd = 'dcos ' + ' '.join(sys.argv[1:])
else:
cmd = 'dcos'
full_cmd = 'dcos'
try:
dcos_hostname = six.moves.urllib.parse.urlparse(
conf.get('core.dcos_url')).hostname
except:
logger.exception('Unable to find the hostname of the cluster.')
dcos_hostname = None
try:
cluster_id = mesos.DCOSClient().metadata().get('CLUSTER_ID')
except:
logger.exception('Unable to get the cluster_id of the cluster.')
cluster_id = None
return {
'cmd': cmd,
'full_cmd': full_cmd,
'dcoscli.version': dcoscli.version,
'python_version': str(sys.version_info),
'config': json.dumps(list(conf.property_items())),
'DCOS_HOSTNAME': dcos_hostname,
'CLUSTER_ID': cluster_id
}
|
# Generated by Django 3.2.8 on 2021-11-05 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sentirsebien', '0014_retroalimentacion'),
]
operations = [
migrations.RenameField(
model_name='retroalimentacion',
old_name='audio_url',
new_name='retro_audio_url',
),
migrations.RenameField(
model_name='retroalimentacion',
old_name='high',
new_name='retro_text',
),
migrations.RenameField(
model_name='retroalimentacion',
old_name='video_url',
new_name='retro_video_url',
),
migrations.RemoveField(
model_name='retroalimentacion',
name='low',
),
migrations.RemoveField(
model_name='retroalimentacion',
name='medium',
),
migrations.AddField(
model_name='retroalimentacion',
name='nivel',
field=models.CharField(blank=True, choices=[('low', 'Low'), ('medium', 'Low'), ('high', 'Low')], max_length=10),
),
]
|
import scadnano as sc
import modifications as mod
def create_design():
# helices
helices = [sc.Helix(max_offset=48), sc.Helix(max_offset=48)]
# left staple
stap_left_ss1 = sc.Domain(helix=1, forward=True, start=8, end=24)
stap_left_ss0 = sc.Domain(helix=0, forward=False, start=8, end=24)
stap_left = sc.Strand(domains=[stap_left_ss1, stap_left_ss0])
# right staple
stap_right_ss0 = sc.Domain(helix=0, forward=False, start=24, end=40)
stap_right_ss1 = sc.Domain(helix=1, forward=True, start=24, end=40)
stap_right = sc.Strand(domains=[stap_right_ss0, stap_right_ss1])
stap_right.set_modification_5p(mod.biotin_5p)
# scaffold
scaf_ss1_left = sc.Domain(helix=1, forward=False, start=8, end=24)
scaf_ss0 = sc.Domain(helix=0, forward=True, start=8, end=40)
loopout = sc.Loopout(length=3)
scaf_ss1_right = sc.Domain(helix=1, forward=False, start=24, end=40)
scaf = sc.Strand(domains=[scaf_ss1_left, scaf_ss0, loopout, scaf_ss1_right], is_scaffold=True)
# whole design
design = sc.Design(helices=helices, strands=[scaf, stap_left, stap_right], grid=sc.square)
# deletions and insertions added to design are added to both strands on a helix
design.add_deletion(helix=1, offset=20)
design.add_insertion(helix=0, offset=14, length=1)
design.add_insertion(helix=0, offset=26, length=2)
# also assigns complement to strands other than scaf bound to it
design.assign_dna(scaf, 'AACGT' * 18)
return design
if __name__ == '__main__':
design = create_design()
design.write_scadnano_file(directory='output_designs')
|
import sys
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import torch
from torch.autograd import Variable
from dataset.dataset import *
from utility.utils import *
from model_History import *
from domains.gridworld_LP import *
from generators.obstacle_gen import *
def main(config,
n_domains=10,
max_obs=10,
max_obs_size=None,
n_traj=10,
n_actions=8):
# Correct vs total:
correct, total = 0.0, 0.0
# Automatic swith of GPU mode if available
use_GPU = torch.cuda.is_available()
# Instantiate a VIN model
vin = VIN(config)
# Load model parameters
vin.load_state_dict(torch.load(config.weights))
# Use GPU if available
if use_GPU:
vin = vin.cuda()
print('use GPU')
for dom in range(n_domains):
#"""Step1: Build map. Add obstacles, border and goal."""
goal = [np.random.randint(config.imsize), np.random.randint(config.imsize)]
# Generate obstacle map
obs = obstacles(domsize=[config.imsize, config.imsize], # domain size
mask=goal, # goal
size_max=3, # obstacl's max size
dom=None, # must be None
obs_types='rect', # obstacl's tpye, 'rect' only
num_types=1) # number of types, 1 only
# add random obstacles to dom,
n_obs = obs.add_n_rand_obs(n=max_obs)
# add border to dom,
border_res = obs.add_border()
### ==> Step2: Find optimal path.
# get final map
im = obs.get_final()
# generate gridworld from obstacle map
G = gridworld(image=im,
n_dirc=8,
targetx=goal[0],
targety=goal[1],
turning_loss=0.053333,
p_sys=0.010225,
p_row=0.0002,
p_col=0.0001)
# solve LP problem
Lambda, Distance, startx, starty = LP(M=G, n_traj=n_traj)
# search optimal path
states_xy = []
n_solver_failure = 0
n_search_failure = 0
n_problem_infeasibly = 0
for i in range(0,n_traj):
if Lambda[i] is 'unsolved':
n_solver_failure = n_solver_failure + 1
elif Lambda[i] is 'unbounded':
n_problem_infeasibly = n_problem_infeasibly + 1
else:
states_xy_one_traj, search_failure = get_opt_path(Lambda=Lambda[i],
Distance=Distance[i],
M=G,
startx=startx[i],
starty=starty[i])
if states_xy_one_traj is None:
pass
else:
states_xy.append(states_xy_one_traj)
n_search_failure = n_search_failure + search_failure
# get all failure times
n_failure = n_solver_failure + n_problem_infeasibly + n_search_failure
# Get value prior
value_prior = G.t_get_reward_prior()
for i in range(n_traj - n_failure):
if len(states_xy[i]) > 1:
# Get number of steps to goal
L = len(states_xy[i]) * 2
# Allocate space for predicted steps
pred_traj = np.zeros((L, 2))
# Set starting position
pred_traj[0, :] = states_xy[i][0, :]
for j in range(1, L):
# Transform current state data
state_data = pred_traj[j - 1, :]
state_data = state_data.astype(np.int)
# Transform domain to Networks expected input shape
im_data = G.image.astype(np.int)
im_data = 1 - im_data
im_data = im_data.reshape(1, 1, config.imsize,
config.imsize)
# Transfrom value prior to Networks expected input shape
value_data = value_prior.astype(np.int)
value_data = value_data.reshape(1, 1, config.imsize,
config.imsize)
# Transfrom last state to Networks expected input shape
last_states = np.zeros((1, 1, config.imsize, config.imsize))
if j == 1:
last_states[0, 0 , state_data[0], state_data[1]] = 10
else:
last_states[0, 0 , np.int(pred_traj[j-2, 0]), np.int(pred_traj[j-2, 1])] = 10
# Get inputs as expected by network
X = torch.from_numpy(
np.concatenate((im_data, value_data, last_states), axis=1)).float()
S1_in = torch.from_numpy(state_data[0].reshape(
[1, 1])).float()
S2_in = torch.from_numpy(state_data[1].reshape(
[1, 1])).float()
X[:, 2, :, :] = X[:, 2, :, :]*-1
S_current = torch.zeros(config.batch_size, 1, config.imsize, config.imsize)
for k in range(0, config.batch_size):
S_current[k, 0, state_data[0], state_data[1]] = -10
if torch.equal(S_current[k, 0, :, :], X[k, 2, :, :]):
X[k, 2, :, :] = torch.zeros(1, 1, config.imsize, config.imsize)
X_in = torch.cat([X, S_current], 1)
# Send Tensors to GPU if available
if use_GPU:
X_in = X_in.cuda()
S1_in = S1_in.cuda()
S2_in = S2_in.cuda()
# Wrap to autograd.Variable
X_in, S1_in, S2_in = Variable(X_in), Variable(
S1_in), Variable(S2_in)
# Forward pass in our neural net
_, predictions, vv = vin(X_in, S1_in, S2_in, config)
v = (torch.squeeze(vv)).detach().cpu().numpy().T
_, indices = torch.max(predictions.cpu(), 1, keepdim=True)
a = indices.data.numpy()[0][0]
# r = visualize_reward(vin, X_in, S1_in, S2_in, config)
# plt.subplot(1, 2, 1)
# sns.heatmap(r, annot=False, square=True, vmin=-1, vmax=1)
# plt.plot(states_xy[i][-1, 0]+0.5, states_xy[i][-1, 1]+0.5, marker='o', markerfacecolor='blue', label='Goal')
# plt.plot(state_data[0]+0.5, state_data[1]+0.5, marker='o', markerfacecolor='orange', label='State')
# # plt.subplot(1, 2, 1)
# # sns.heatmap(v, annot=False, square=True, vmin=-1, vmax=1)
# # plt.plot(states_xy[i][-1, 0]+0.5, states_xy[i][-1, 1]+0.5, marker='o', markerfacecolor='blue', label='Goal')
# # plt.plot(state_data[0]+0.5, state_data[1]+0.5, marker='o', markerfacecolor='orange', label='State')
# plt.subplot(1, 2, 2)
# plt.imshow(G.image.T, cmap="Greys_r")
# plt.plot(states_xy[i][-1, 0], states_xy[i][-1, 1], marker='o', markerfacecolor='blue', label='Goal')
# plt.plot(state_data[0], state_data[1], marker='o', markerfacecolor='orange', label='State')
# plt.legend()
# plt.draw()
# plt.waitforbuttonpress(0)
# plt.close()
# Transform prediction to indices
s = G.map_ind_to_state(pred_traj[j - 1, 0],
pred_traj[j - 1, 1])
ns = G.sample_next_state(s, a)
nr, nc = G.get_coords_test(ns)
pred_traj[j, 0] = nr
pred_traj[j, 1] = nc
if nr == goal[0] and nc == goal[1]:
# We hit goal so fill remaining steps
pred_traj[j + 1:, 0] = nr
pred_traj[j + 1:, 1] = nc
break
# Plot optimal and predicted path (also start, end)
if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]:
correct += 1
total += 1
if config.plot == True:
visualize_test(G.image.T, states_xy[i], pred_traj)
sys.stdout.write("\r" + str(int(
(float(dom) / n_domains) * 100.0)) + "%")
sys.stdout.flush()
sys.stdout.write("\n")
print('Rollout Accuracy: {:.2f}%'.format(100 * (correct / total)))
def visualize_test(dom, states_xy, pred_traj):
fig, ax = plt.subplots()
implot = plt.imshow(dom, cmap="Greys_r")
ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path')
ax.plot(pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path')
ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start')
ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal')
legend = ax.legend(loc='upper right', shadow=False)
for label in legend.get_texts():
label.set_fontsize('x-small') # the legend text size
for label in legend.get_lines():
label.set_linewidth(0.5) # the legend line width
plt.draw()
plt.waitforbuttonpress(0)
plt.close(fig)
def visualize_reward(model, X_in, S1_in, S2_in, config):
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = (torch.squeeze(output)).detach().cpu().numpy().T
return hook
model.r.register_forward_hook(get_activation('r'))
output = model(X_in, S1_in, S2_in, config)
return activation['r']
if __name__ == '__main__':
# Parsing training parameters
parser = argparse.ArgumentParser()
parser.add_argument(
'--weights',
type=str,
default='trained/vin_16x16_LP_History_3_2021.pth',
help='Path to trained weights')
parser.add_argument('--plot', action='store_true', default=True)
parser.add_argument('--imsize', type=int, default=16, help='Size of image')
parser.add_argument(
'--k', type=int, default=20, help='Number of Value Iterations')
parser.add_argument(
'--l_i', type=int, default=4, help='Number of channels in input layer')
parser.add_argument(
'--l_h1',
type=int,
default=50,
help='Number of channels in 1st hidden layer')
# parser.add_argument(
# '--l_h2',
# type=int,
# default=50,
# help='Number of channels in 2nd hidden layer')
# parser.add_argument(
# '--l_h3',
# type=int,
# default=50,
# help='Number of channels in 3rd hidden layer')
parser.add_argument(
'--l_q',
type=int,
default=10,
help='Number of channels in q layer (~actions) in VI-module')
parser.add_argument(
'--batch_size', type=int, default=1, help='Batch size')
config = parser.parse_args()
# Compute Paths generated by network and plot
main(config)
|
# *********************************************************************************
# REopt, Copyright (c) 2019-2020, Alliance for Sustainable Energy, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# *********************************************************************************
import csv
import os
import sys
import traceback as tb
import uuid
import copy
from django.http import JsonResponse
from reo.src.load_profile import BuiltInProfile
from reo.models import URDBError
from reo.nested_inputs import nested_input_definitions
from reo.api import UUIDFilter
from reo.models import ModelManager
from reo.exceptions import UnexpectedError #, RequestError # should we save bad requests? could be sql injection attack?
import logging
log = logging.getLogger(__name__)
from reo.src.techs import Generator
from django.http import HttpResponse
from django.template import loader
# loading the labels of hard problems - doing it here so loading happens once on startup
hard_problems_csv = os.path.join('reo', 'hard_problems.csv')
hard_problem_labels = [i[0] for i in csv.reader(open(hard_problems_csv, 'r'))]
def make_error_resp(msg):
resp = dict()
resp['messages'] = {'error': msg}
resp['outputs'] = dict()
resp['outputs']['Scenario'] = dict()
resp['outputs']['Scenario']['status'] = 'error'
return resp
def errors(request, page_uuid):
template= loader.get_template("errors.html")
return HttpResponse(template.render())
def help(request):
try:
response = copy.deepcopy(nested_input_definitions)
return JsonResponse(response)
except Exception as e:
return JsonResponse({"Error": "Unexpected error in help endpoint: {}".format(e.args[0])})
def invalid_urdb(request):
try:
# invalid set is populated by the urdb validator, hard problems defined in csv
invalid_set = list(set([i.label for i in URDBError.objects.filter(type='Error')]))
return JsonResponse({"Invalid IDs": list(set(invalid_set + hard_problem_labels))})
except Exception as e:
return JsonResponse({"Error": "Unexpected error in invalid_urdb endpoint: {}".format(e.args[0])})
def annual_kwh(request):
try:
latitude = float(request.GET['latitude']) # need float to convert unicode
longitude = float(request.GET['longitude'])
doe_reference_name = request.GET['doe_reference_name']
if doe_reference_name not in BuiltInProfile.default_buildings:
raise ValueError("Invalid doe_reference_name. Select from the following: {}"
.format(BuiltInProfile.default_buildings))
if latitude > 90 or latitude < -90:
raise ValueError("latitude out of acceptable range (-90 <= latitude <= 90)")
if longitude > 180 or longitude < -180:
raise ValueError("longitude out of acceptable range (-180 <= longitude <= 180)")
uuidFilter = UUIDFilter('no_id')
log.addFilter(uuidFilter)
b = BuiltInProfile(latitude=latitude, longitude=longitude, doe_reference_name=doe_reference_name)
response = JsonResponse(
{'annual_kwh': b.annual_kwh,
'city': b.city},
)
return response
except KeyError as e:
return JsonResponse({"Error. Missing": str(e.args[0])})
except ValueError as e:
return JsonResponse({"Error": str(e.args[0])})
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
debug_msg = "exc_type: {}; exc_value: {}; exc_traceback: {}".format(exc_type, exc_value.args[0],
tb.format_tb(exc_traceback))
log.debug(debug_msg)
return JsonResponse({"Error": "Unexpected error in annual_kwh endpoint. Check log for more."})
def remove(request, run_uuid):
try:
ModelManager.remove(run_uuid) # ModelManager has some internal exception handling
return JsonResponse({"Success":True}, status=204)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = UnexpectedError(exc_type, exc_value.args[0], tb.format_tb(exc_traceback), task='reo.views.results', run_uuid=run_uuid)
err.save_to_db()
resp = make_error_resp(err.message)
return JsonResponse(resp)
def results(request, run_uuid):
try:
uuid.UUID(run_uuid) # raises ValueError if not valid uuid
except ValueError as e:
if e.args[0] == "badly formed hexadecimal UUID string":
resp = make_error_resp(e.args[0])
return JsonResponse(resp, status=400)
else:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = UnexpectedError(exc_type, exc_value.args[0], tb.format_tb(exc_traceback), task='results', run_uuid=run_uuid)
err.save_to_db()
return JsonResponse({"Error": str(err.args[0])}, status=400)
try:
d = ModelManager.make_response(run_uuid) # ModelManager has some internal exception handling
response = JsonResponse(d)
return response
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = UnexpectedError(exc_type, exc_value.args[0], tb.format_tb(exc_traceback), task='reo.views.results', run_uuid=run_uuid)
err.save_to_db()
resp = make_error_resp(err.message)
return JsonResponse(resp)
def simulated_load(request):
try:
latitude = float(request.GET['latitude']) # need float to convert unicode
longitude = float(request.GET['longitude'])
doe_reference_name = request.GET['doe_reference_name']
try: # annual_kwh is optional. if not provided, then DOE reference value is used.
annual_kwh = float(request.GET['annual_kwh'])
except KeyError:
annual_kwh = None
try: # monthly_totals_kwh is optional. if not provided, then DOE reference value is used.
monthly_totals_kwh = float(request.GET['monthly_totals_kwh'])
except KeyError:
monthly_totals_kwh = None
if doe_reference_name not in BuiltInProfile.default_buildings:
raise ValueError("Invalid doe_reference_name. Select from the following: {}"
.format(BuiltInProfile.default_buildings))
if latitude > 90 or latitude < -90:
raise ValueError("latitude out of acceptable range (-90 <= latitude <= 90)")
if longitude > 180 or longitude < -180:
raise ValueError("longitude out of acceptable range (-180 <= longitude <= 180)")
b = BuiltInProfile(latitude=latitude, longitude=longitude, doe_reference_name=doe_reference_name,
annual_kwh=annual_kwh, monthly_totals_kwh=monthly_totals_kwh)
lp = b.built_in_profile
response = JsonResponse(
{'loads_kw': [round(ld, 3) for ld in lp],
'annual_kwh': b.annual_kwh,
'min_kw': round(min(lp), 3),
'mean_kw': round(sum(lp) / len(lp), 3),
'max_kw': round(max(lp), 3),
}
)
return response
except KeyError as e:
return JsonResponse({"Error. Missing": str(e.args[0])})
except ValueError as e:
return JsonResponse({"Error": str(e.args[0])})
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
debug_msg = "exc_type: {}; exc_value: {}; exc_traceback: {}".format(exc_type, exc_value.args[0],
tb.format_tb(exc_traceback))
log.error(debug_msg)
return JsonResponse({"Error": "Unexpected error in simulated_load endpoint. Check log for more."})
def generator_efficiency(request):
"""
From Navigant report / dieselfuelsupply.com, fitting a curve to the partial to full load points:
CAPACITY RANGE m [gal/kW] b [gal]
0 < C <= 40 kW 0.068 0.0125
40 < C <= 80 kW 0.066 0.0142
80 < C <= 150 kW 0.0644 0.0095
150 < C <= 250 kW 0.0648 0.0067
250 < C <= 750 kW 0.0656 0.0048
750 < C <= 1500 kW 0.0657 0.0043
1500 < C kW 0.0657 0.004
"""
try:
generator_kw = float(request.GET['generator_kw']) # need float to convert unicode
if generator_kw <= 0:
raise ValueError("Invalid generator_kw, must be greater than zero.")
m, b = Generator.default_fuel_burn_rate(generator_kw)
response = JsonResponse(
{'slope_gal_per_kwh': m,
'intercept_gal_per_hr': b,
}
)
return response
except ValueError as e:
return JsonResponse({"Error": str(e.args[0])})
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
debug_msg = "exc_type: {}; exc_value: {}; exc_traceback: {}".format(exc_type, exc_value.args[0],
tb.format_tb(exc_traceback))
log.debug(debug_msg)
return JsonResponse({"Error": "Unexpected error in generator_efficiency endpoint. Check log for more."})
|
import random
def scramble(input_file="autoAnnotated.jsonl"):
"""
This function scrambles the lines in a text document
(Can be removed since doccano already has a scrambler,
if doccano is run from a common server)
"""
with open(input_file, encoding='utf-8') as file:
lines = file.readlines()
random.shuffle(lines)
with open("autoAnnotated_scrambled.jsonl", 'w', encoding='utf-8') as f:
for line in lines:
f.write(line)
|
"""
Base class for FFC unit tests.
"""
from functools import wraps
from unittest import TestCase
from numpy import arange, prod
from numpy.random import randn, seed as random_seed
from pandas import date_range, Int64Index, DataFrame
from six import iteritems
from zipline.finance.trading import TradingEnvironment
from zipline.modelling.engine import SimpleFFCEngine
from zipline.modelling.graph import TermGraph
from zipline.utils.test_utils import make_simple_asset_info, ExplodingObject
from zipline.utils.tradingcalendar import trading_day
def with_defaults(**default_funcs):
"""
Decorator for providing dynamic default values for a method.
Usages:
@with_defaults(foo=lambda self: self.x + self.y)
def func(self, foo):
...
If a value is passed for `foo`, it will be used. Otherwise the function
supplied to `with_defaults` will be called with `self` as an argument.
"""
def decorator(f):
@wraps(f)
def method(self, *args, **kwargs):
for name, func in iteritems(default_funcs):
if name not in kwargs:
kwargs[name] = func(self)
return f(self, *args, **kwargs)
return method
return decorator
with_default_shape = with_defaults(shape=lambda self: self.default_shape)
class BaseFFCTestCase(TestCase):
def setUp(self):
self.__calendar = date_range('2014', '2015', freq=trading_day)
self.__assets = assets = Int64Index(arange(1, 20))
# Set up env for test
env = TradingEnvironment()
env.write_data(
equities_df=make_simple_asset_info(
assets,
self.__calendar[0],
self.__calendar[-1],
))
self.__finder = env.asset_finder
self.__mask = self.__finder.lifetimes(self.__calendar[-10:])
@property
def default_shape(self):
"""Default shape for methods that build test data."""
return self.__mask.shape
def run_terms(self, terms, initial_workspace, mask=None):
"""
Compute the given terms, seeding the workspace of our FFCEngine with
`initial_workspace`.
Parameters
----------
terms : dict
Mapping from termname -> term object.
Returns
-------
results : dict
Mapping from termname -> computed result.
"""
engine = SimpleFFCEngine(
ExplodingObject(),
self.__calendar,
self.__finder,
)
mask = mask if mask is not None else self.__mask
return engine.compute_chunk(TermGraph(terms), mask, initial_workspace)
def build_mask(self, array):
ndates, nassets = array.shape
return DataFrame(
array,
# Use the **last** N dates rather than the first N so that we have
# space for lookbacks.
index=self.__calendar[-ndates:],
columns=self.__assets[:nassets],
dtype=bool,
)
@with_default_shape
def arange_data(self, shape, dtype=float):
"""
Build a block of testing data from numpy.arange.
"""
return arange(prod(shape), dtype=dtype).reshape(shape)
@with_default_shape
def randn_data(self, seed, shape):
"""
Build a block of testing data from numpy.random.randn.
"""
random_seed(seed)
return randn(*shape)
|
#!/usr/bin/env python
"""Tests for the Rekall profile server."""
import urllib2
import zlib
from rekall import constants
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import rekall_profile_server
from grr.lib import test_lib
from grr.lib import utils
# pylint: mode=test
class FakeHandle(object):
read_count = 0
def __init__(self, url):
# Convert the url back into a profile canonical name.
profile = url.split(constants.PROFILE_REPOSITORY_VERSION + "/")[1]
profile = profile.split(".")[0]
self.profile = profile
def read(self):
FakeHandle.read_count += 1
profile_name = self.profile
server = test_lib.TestRekallRepositoryProfileServer()
profile = server.GetProfileByName(
profile_name, version=constants.PROFILE_REPOSITORY_VERSION)
return profile.data
def FakeOpen(url, timeout=None): # pylint: disable=invalid-name
_ = timeout
return FakeHandle(url)
class ProfileServerTest(test_lib.GRRBaseTest):
def setUp(self):
self.server = rekall_profile_server.GRRRekallProfileServer()
super(ProfileServerTest, self).setUp()
def testProfileFetching(self):
profile_name = "nt/GUID/F8E2A8B5C9B74BF4A6E4A48F180099942"
FakeHandle.read_count = 0
with utils.Stubber(urllib2, "urlopen", FakeOpen):
profile = self.server.GetProfileByName(
profile_name,
version=constants.PROFILE_REPOSITORY_VERSION)
uncompressed = zlib.decompress(profile.data, 16 + zlib.MAX_WBITS)
self.assertTrue("BusQueryDeviceID" in uncompressed)
# We issued one http request.
self.assertEqual(FakeHandle.read_count, 1)
with utils.Stubber(urllib2, "urlopen", FakeOpen):
profile = self.server.GetProfileByName(
profile_name,
version=constants.PROFILE_REPOSITORY_VERSION)
# This time it should have been cached.
self.assertEqual(FakeHandle.read_count, 1)
def testGzExtension(self):
with utils.Stubber(urllib2, "urlopen", FakeOpen):
profile = self.server.GetProfileByName("pe")
# We received compressed data.
zlib.decompress(profile.data, 16 + zlib.MAX_WBITS)
# We issued one http request.
self.assertEqual(FakeHandle.read_count, 1)
self.server.GetProfileByName("pe")
# This time it should have been cached.
self.assertEqual(FakeHandle.read_count, 1)
self.server.GetProfileByName("pe")
# This is the same profile.
self.assertEqual(FakeHandle.read_count, 1)
cache_urn = rdfvalue.RDFURN(config_lib.CONFIG["Rekall.profile_cache_urn"])
cached_items = list(aff4.FACTORY.Open(
cache_urn.Add(constants.PROFILE_REPOSITORY_VERSION),
token=self.token).ListChildren())
# We cache the .gz only.
self.assertEqual(len(cached_items), 1)
self.assertEqual(cached_items[0].Basename(), "pe")
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="dispersion", # Replace with your own username
version="0.1.0-beta.5",
author="Phillip Manley",
author_email="phillip.manley@helmholtz-berlin.de",
description="support for libraries of optical dispersion (refractive index) data files",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nano-sippe/dispersion",
packages=find_packages(where="src"),
package_dir={"": "src"},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=['numpy', 'matplotlib', 'pandas', 'scipy', 'PyYAML'],
python_requires='>=3.6',
include_package_data=True,
entry_points={
'console_scripts': ['dispersion_setup='+
'dispersion.scripts.'+
'setup_dispersion:main',
'dispersion_catalogue_rebuild='+
'dispersion.scripts.'+
'catalogue_rebuild:main',],
}
)
#data_files=[('config',['cfg/config.yaml'])],
|
n = float(input('quanto de dinheiro tem a sua carteira ?:'))
d = n / 3.27
print('com R${} vc pode compra US${:.2f}'.format(n, d))
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
from typing import Union
from pyrogram import raw
from pyrogram.raw.core import TLObject
EmojiKeyword = Union[raw.types.EmojiKeyword, raw.types.EmojiKeywordDeleted]
# noinspection PyRedeclaration
class EmojiKeyword: # type: ignore
"""This base type has 2 constructors available.
Constructors:
.. hlist::
:columns: 2
- :obj:`EmojiKeyword <pyrogram.raw.types.EmojiKeyword>`
- :obj:`EmojiKeywordDeleted <pyrogram.raw.types.EmojiKeywordDeleted>`
"""
QUALNAME = "pyrogram.raw.base.EmojiKeyword"
def __init__(self):
raise TypeError("Base types can only be used for type checking purposes: "
"you tried to use a base type instance as argument, "
"but you need to instantiate one of its constructors instead. "
"More info: https://docs.pyrogram.org/telegram/base/emoji-keyword")
|
import random
import time
# Binary Search algorithm
def naive_search(l, target):
# example l = (,. 3. 10. 12)
for i in range(len(l)):
if l[i] == target:
return i
return -1
def binary_search(l, target, low=None, high=None):
if low is None:
low = 0
if high is None:
high = len(l) - 1
if high < low:
return -1
midpoint = (low + high) // 2
if l[midpoint] == target:
return midpoint
elif target < l[midpoint]:
return binary_search(l, target, low, midpoint-1)
else:
return binary_search(l, target, midpoint+1, high)
if __name__ =='__main__':
# l = [1, 3, 5, 10, 12]
# target = 10
# print(naive_search(l, target))
# print(binary_search(l, target))
length = 10000
sorted_list = set()
while len(sorted_list) < length:
sorted_list.add(random.randint(-3*length, 3*length))
sorted_list = sorted(list(sorted_list))
start = time.time()
for target in sorted_list:
naive_search(sorted_list, target)
end = time.time()
print("Naive search time: ", (end - start)/length, "seconds")
start = time.time()
for target in sorted_list:
binary_search(sorted_list, target)
end = time.time()
print("Binary search time: ", (end - start)/length, "seconds")
|
"""
Dummy conftest.py for tdde13.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
- https://docs.pytest.org/en/stable/fixture.html
- https://docs.pytest.org/en/stable/writing_plugins.html
"""
# import pytest
|
#!/usr/bin/env python
# encoding: UTF-8
"""
网易云音乐 Api
https://github.com/bluetomlee/NetEase-MusicBox
The MIT License (MIT)
CopyRight (c) 2014 vellow <i@vellow.net>
modified by
"""
import json
import requests
from PyQt5.QtCore import pyqtSignal, QObject
from constants import DATA_PATH
from base.common import singleton, func_coroutine, write_json_into_file
from base.logger import LOG
# list去重
def uniq(arr):
arr2 = list(set(arr))
arr2.sort(key=arr.index)
return arr2
"""
TODO: add local cache
"""
@singleton
class NetEase(QObject):
signal_load_progress = pyqtSignal([int])
cookies_filename = "netease_cookies.json"
def __init__(self):
super().__init__()
self.headers = {
'Host': 'music.163.com',
'Connection': 'keep-alive',
'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8",
'Referer': 'http://music.163.com/',
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36"
}
self.cookies = dict(appver="1.2.1", os="osx")
def show_progress(self, response):
content = bytes()
total_size = response.headers.get('content-length')
if total_size is None:
LOG.info(u'这个网络response没有Content-Length字段')
content = response.content
return content
else:
total_size = int(total_size)
bytes_so_far = 0
for chunk in response.iter_content():
content += chunk
bytes_so_far += len(chunk)
progress = round(bytes_so_far * 1.0 / total_size * 100)
self.signal_load_progress.emit(progress)
return content
def load_cookies(self):
try:
with open(DATA_PATH + self.cookies_filename) as f:
data_str = f.read()
self.cookies = json.loads(data_str)
except Exception as e:
LOG.error(str(e))
@func_coroutine
def save_cookies(self):
try:
write_json_into_file(self.cookies, DATA_PATH + self.cookies_filename)
LOG.info("Save cookies successfully")
except Exception as e:
LOG.error(str(e))
LOG.error("Save cookies failed")
def http_request(self, method, action, query=None, urlencoded=None, callback=None, timeout=1):
try:
res = None
if method == "GET":
res = requests.get(action, headers=self.headers, cookies=self.cookies, timeout=timeout)
elif method == "POST":
res = requests.post(action, query, headers=self.headers, cookies=self.cookies, timeout=timeout)
elif method == "POST_UPDATE":
res = requests.post(action, query, headers=self.headers, cookies=self.cookies, timeout=timeout)
self.cookies.update(res.cookies.get_dict())
self.save_cookies()
content = self.show_progress(res)
content_str = content.decode('utf-8')
content_dict = json.loads(content_str)
return content_dict
except Exception as e:
LOG.error(str(e))
return {"code": 408}
def login(self, username, pw_encrypt, phone=False):
action = 'http://music.163.com/api/login/'
phone_action = 'http://music.163.com/api/login/cellphone/'
data = {
'username': username,
'password': pw_encrypt,
'rememberLogin': 'true'
}
phone_data = {
'phone': username,
'password': pw_encrypt,
'rememberLogin': 'true'
}
if phone is True:
res_data = self.http_request("POST_UPDATE", phone_action, phone_data)
return res_data
else:
res_data = self.http_request("POST_UPDATE", action, data)
return res_data
def check_cookies(self):
url = "http://music.163.com/api/push/init"
data = self.http_request("POST_UPDATE", url, {})
if data['code'] == 200:
return True
return False
def confirm_captcha(self, captcha_id, text):
action = 'http://music.163.com/api/image/captcha/verify/hf?id=' + str(captcha_id) + '&captcha=' + str(text)
data = self.http_request('GET', action)
return data
def get_captcha_url(self, captcha_id):
action = 'http://music.163.com/captcha?id=' + str(captcha_id)
return action
# 用户歌单
def user_playlist(self, uid, offset=0, limit=100):
action = 'http://music.163.com/api/user/playlist/?offset=' + str(offset) + '&limit=' + str(
limit) + '&uid=' + str(uid)
res_data = self.http_request('GET', action)
return res_data
# 搜索单曲(1),歌手(100),专辑(10),歌单(1000),用户(1002) *(type)*
def search(self, s, stype=1, offset=0, total='true', limit=60):
action = 'http://music.163.com/api/search/get/web'
data = {
's': s,
'type': stype,
'offset': offset,
'total': total,
'limit': 60
}
return self.http_request('POST', action, data)
# 歌单详情
def playlist_detail(self, playlist_id):
action = 'http://music.163.com/api/playlist/detail?id=' + str(playlist_id) + '&offset=0&total=true&limit=1001'
res_data = self.http_request('GET', action)
return res_data
# 歌手相关
def artist_infos(self, artist_id):
"""
:param artist_id: artist_id
:return: {
code: int,
artist: {artist},
more: boolean,
hotSongs: [songs]
}
"""
action = 'http://music.163.com/api/artist/' + str(artist_id)
data = self.http_request('GET', action)
return data
# album id --> song id set
def album_infos(self, album_id):
"""
:param album_id:
:return: {
code: int,
album: { album }
}
"""
action = 'http://music.163.com/api/album/' + str(album_id)
data = self.http_request('GET', action)
return data
# song id --> song url ( details )
def song_detail(self, music_id):
action = "http://music.163.com/api/song/detail/?id=" + str(music_id) + "&ids=[" + str(music_id) + "]"
data = self.http_request('GET', action)
return data
# DJchannel ( id, channel_name ) ids --> song urls ( details )
# 将 channels 整理为 songs 类型
def channel_detail(self, channelids, offset=0):
channels = []
for i in range(0, len(channelids)):
action = 'http://music.163.com/api/dj/program/detail?id=' + str(channelids[i])
data = self.http_request('GET', action)
try:
channel = self.dig_info(data['program']['mainSong'], 'channels')
channels.append(channel)
except:
continue
return channels
def addMusicToPlaylist(self, mid, pid, op):
"""
:param op: add or del
把mid这首音乐加入pid这个歌单列表当中去
1. 如果歌曲已经在列表当中,返回code为502
"""
url_add = 'http://music.163.com/api/playlist/manipulate/tracks'
trackIds = '["' + str(mid) + '"]'
data_add = {
'tracks': str(mid), # music id
'pid': str(pid), # playlist id
'trackIds': trackIds, # music id str
'op': op # opation
}
return self.http_request('POST', url_add, data_add)
def set_music_favorite(self, mid, flag):
url = "http://music.163.com/api/song/like"
data = {
"trackId": mid,
"like": str(flag).lower(),
"time": 0
}
return self.http_request("POST", url, data)
def getRadioMusic(self):
url = 'http://music.163.com/api/radio/get'
return self.http_request('GET', url)
def get_mv_detail(self, mvid):
"""Get mv detail
:param mvid: mv id
:return:
"""
url = 'http://music.163.com/api/mv/detail?id=' + str(mvid)
return self.http_request('GET', url)
def get_lyric_by_musicid(self, mid):
"""Get song lyric
:param mid: music id
:return: {
lrc: {
version: int,
lyric: str
},
tlyric: {
version: int,
lyric: str
}
sgc: bool,
qfy: bool,
sfy: bool,
transUser: {},
code: int,
}
"""
# tv 表示翻译。-1:表示要翻译,1:不要
url = 'http://music.163.com/api/song/lyric?' + 'id=' + str(mid) + '&lv=1&kv=1&tv=-1'
return self.http_request('GET', url)
|
import doctest
import pytest
from insights.parsers import engine_db_query, ParseException, SkipException
from insights.tests import context_wrap
OUTPUT = """
{
"id_host": "None",
"when": "2020-06-21 12:45:59",
"time": "0.00263094902039",
"name": "None",
"description": "None",
"type": "None",
"kb": "None",
"bugzilla": "None",
"file": "",
"path": "None",
"id": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"hash": "d41d8cd98f00b204e9800998ecf8427e",
"result": [
{
"vds_name": "hosto",
"rpm_version": "vdsm-4.30.40-1.el7ev"
}
]
}
""".strip()
OUTPUT_2 = """
{
"id_host": "None",
"when": "2020-06-21 12:45:59",
"time": "0.00263094902039",
"name": "None",
"description": "None",
"type": "None",
"kb": "None",
"bugzilla": "None",
"file": "",
"path": "None",
"id": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"hash": "d41d8cd98f00b204e9800998ecf8427e",
"result": [
{
"vds_name": "hosto",
"rpm_version": "vdsm-4.40.20-33.git1b7dedcf3.fc30"
},
{
"vds_name": "hosto2",
"rpm_version": "vdsm-4.40.13-38.gite9bae3c68.fc30"
}
]
}
""".strip()
ERROR = """
SELECT
row_to_json(t)
FROM
(SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;) t
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/engine_db_query/__init__.py", line 337, in query_return_json
query
SyntaxError: syntax error at or near ";"
LINE 5: ...ds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;) t
^
Traceback (most recent call last):
File "/usr/bin/engine-db-query", line 281, in <module>
sys.exit(main())
File "/usr/bin/engine-db-query", line 273, in main
knowledge_base=args.kb_url
File "/usr/lib/python2.7/site-packages/engine_db_query/__init__.py", line 213, in execute
knowledge_base=knowledge_base
File "/usr/lib/python2.7/site-packages/engine_db_query/__init__.py", line 348, in query_return_json
ret = cursor.fetchall()
psycopg2.ProgrammingError: no results to fetch
""".strip()
def test_edbq():
output = engine_db_query.EngineDBQueryVDSMversion(context_wrap(OUTPUT))
assert output.get('id', None) == 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
assert output.result[0].get('rpm_version') == 'vdsm-4.30.40-1.el7ev'
# for multiple hosts
output = engine_db_query.EngineDBQueryVDSMversion(context_wrap(OUTPUT_2))
assert output.result == [{'vds_name': 'hosto', 'rpm_version': 'vdsm-4.40.20-33.git1b7dedcf3.fc30'}, {'vds_name': 'hosto2', 'rpm_version': 'vdsm-4.40.13-38.gite9bae3c68.fc30'}]
# No content
with pytest.raises(SkipException) as e:
engine_db_query.EngineDBQueryVDSMversion(context_wrap(""))
assert "Empty output." in str(e)
# Error
with pytest.raises(ParseException) as e:
engine_db_query.EngineDBQueryVDSMversion(context_wrap(ERROR))
assert "couldn't parse json." in str(e)
def test_doc_examples():
env = {
'output': engine_db_query.EngineDBQueryVDSMversion(context_wrap(OUTPUT))
}
failed, total = doctest.testmod(engine_db_query, globs=env)
assert failed == 0
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
"""Fix Bound Joint
v0.1.0
Description
-----------
Fixes bound joints to the correct worldOrientation in connected skinClusters
Usage
-----
Select a joint which is an influence for a skinCluster, open up the script UI,
select "freeze transformations" if you want to zero rotation values, select
"Apply to children" if you want to apply the same fix to child joints...
Change Log
----------
0.1.0
-----
- Moved the script to Python
- It also fixes any joint with input connection, thus it can fix joints which
are in character set or have an input connection
"""
__version__ = "0.1.0"
import pymel.core as pm
def UI():
"""The UI of the script
"""
window_width = 153
window_height = 80
window_name = "oyFixBoundJoint_Window"
if pm.window(window_name, ex=True):
pm.deleteUI(window_name, window=True)
window = pm.window(
window_name,
tlb=True,
title="fixBoundJoint " + __version__,
widthHeight=(window_width, window_height)
)
pm.columnLayout("FBJ_columnLayout1", adj=True)
pm.checkBox(
"FBJ_checkBox1",
l="Freeze transformations",
al="left",
v=1
)
pm.checkBox(
"FBJ_checkBox2",
l="Apply to children",
al="left"
)
pm.button(
"FBJ_button1",
l="Apply",
c=get_check_box_states_and_run
)
pm.setParent()
window.show()
window.setWidthHeight(val=(window_width, window_height))
def get_check_box_states_and_run(*args, **kwargs):
"""Gets the data from UI and runs the script
"""
freeze = pm.checkBox("FBJ_checkBox1", q=True, v=True)
apply_to_children = pm.checkBox("FBJ_checkBox2", q=True, v=True)
selection_list = pm.ls(sl=1, type="joint")
do_fix(selection_list, freeze, apply_to_children)
pm.select(selection_list)
def do_fix(joints, freeze=True, apply_to_children=False):
"""Fixes the given list of bound joints by copying the current worldMatrix
information to the related skinClusters.
:param freeze: If freeze is given as True (default) it will also set the
rotations of the joint to (0, 0, 0). The default value is True.
:param apply_to_children: If given as True it will also apply the operation
to the children of the given joints
"""
new_selection_list = joints
if apply_to_children:
pm.select(joints, hi=True)
new_selection_list = pm.ls(sl=1, type="joint")
for joint in new_selection_list:
connections = joint.worldMatrix.outputs(
c=1,
p=1,
t="skinCluster",
et=True
)
if freeze:
freeze_joint(joint)
matrix = joint.worldInverseMatrix.get()
for attribute_data in connections:
skinCluster_attribute = attribute_data[1]
skinCluster_node = skinCluster_attribute.node()
index = skinCluster_attribute.index()
skinCluster_node.bindPreMatrix[index].set(matrix)
def freeze_joint(joint):
"""Freezes the given joint by duplicating it and applying the freeze to the
duplicate and then copy the joint orientation values to the original joint.
:param joint: The joint which wanted to be frozen
"""
dup_joint = pm.duplicate(joint, rc=1)[0]
# if the duplicate has any children delete them
pm.delete(dup_joint.getChildren())
# unlock rotate channels
dup_joint.rotateX.unlock()
dup_joint.rotateY.unlock()
dup_joint.rotateZ.unlock()
# freeze the joint
pm.makeIdentity(dup_joint, apply=1, r=1)
# set rotation to zero
if not joint.rotateX.isLocked():
joint.rotateX.set(0)
else:
# unlock and lock it again
joint.rotateX.unlock()
joint.rotateX.set(0)
joint.rotateX.lock()
if not joint.rotateY.isLocked():
joint.rotateY.set(0)
else:
# unlock and lock it again
joint.rotateY.unlock()
joint.rotateY.set(0)
joint.rotateY.lock()
if not joint.rotateZ.isLocked():
joint.rotateZ.set(0)
else:
# unlock and lock it again
joint.rotateZ.unlock()
joint.rotateZ.set(0)
joint.rotateZ.lock()
# get the joint orient
joint.jointOrient.set(dup_joint.jointOrient.get())
# delete the duplicate joint
pm.delete(dup_joint)
|
# -*- coding: utf-8 -*-
import xlrd
from collections import OrderedDict
import simplejson as json
import sys
import traceback
import os
import pwd
import grp
import shutil
dbginfo = 1
dbgdetail = 2
dbg = os.environ.get("DBG")
confdir=os.environ.get("CONFDIR")
gitrepodir=os.environ.get("GITREPODIR")
homedir=os.environ.get("HOME")
user=os.environ.get("USER")
vpcnetfile=os.environ.get("vpcnetfile")
aclfile=os.environ.get("aclfile")
hostsfile=os.environ.get("hostsfile")
eipfile=os.environ.get("eipfile")
sfjpara=os.environ.get("sfjpara")
tfcmd=os.environ.get("tfcmd")
parafile="ParamaterSeet.json"
appconffile="appconf.json"
zbxserv=os.environ.get("ZBXHOST")
def sfjmakej2p(sysname,parafile,dbg=0):
try:
currentdir = os.getcwd()
varsdir="%s/%s" % (gitrepodir,sysname)
varsfile="%s/%s" % (varsdir,sfjpara)
if os.path.exists(varsdir) is False:
os.makedirs(varsdir)
appfile="%s/%s" % (confdir,appconffile)
appdata = OrderedDict()
with open(appfile, 'r') as f:
appdata = json.load(f)
f.close()
parafile="%s/%s/%s_%s" % (gitrepodir,sysname,sysname,parafile)
paradata = OrderedDict()
with open(parafile, 'r') as f:
paradata = json.load(f)
f.close()
if dbg >= dbgdetail:
print json.dumps(paradata, indent=2)
#
if sysname != paradata['system']['0']['sysname']:
print "Different SystemName : %s : %s" % sysname,paradata['system'][0]['sysname']
exit( 1 )
# パラメータファイルの作成(sfjpara.tfvars)
f = open(varsfile, "w")
for cloudno in paradata['cloud']:
pemfile="%s/%s" % (confdir,paradata['cloud'][cloudno]['privatekeyfile'])
gitpemfile="%s/%s/%s" % (gitrepodir,sysname,paradata['cloud'][cloudno]['privatekeyfile'])
sfjuser=paradata['cloud'][cloudno]['privatekeyfile'].replace(".pem","" )
accountid="%s" % (paradata['cloud'][cloudno]['accountid'])
f.write( "aws_access_key=\"%s\"\n" % paradata['cloud'][cloudno]['accesskey'] )
f.write( "aws_secret_key=\"%s\"\n" % paradata['cloud'][cloudno]['secretkey'] )
f.write( "reagion=\"%s\"\n" % paradata['cloud'][cloudno]['reagion'] )
f.write( "ansible_ssh_user=\"%s\"\n" % accountid )
f.write( "privatekeyfile=\"%s\"\n" % paradata['cloud'][cloudno]['privatekeyfile'] )
f.write( "private_key_name=\"%s\"\n" % sfjuser)
f.write( "\n" )
f.write( "network0=\"%s\"\n" % paradata['network']['0']['network'] )
f.write( "netmask0=\"%d\"\n" % int(paradata['network']['0']['netmask']) )
f.write( "\n" )
for hostno in paradata['hosts']:
no = int(hostno) + 1
f.write( "hostname=\"%s\"\n" % paradata['hosts'][hostno]['hostname'] )
f.write( "instance_type%d=\"%s\"\n" % (no,paradata['hosts'][hostno]['vmtype']) )
f.write( "hostip%d=\"%s\"\n" % (no,paradata['hosts'][hostno]['hostip']) )
f.write( "amitype%d=\"%s\"\n" % (no,paradata['hosts'][hostno]['ostype']) )
f.write( "\n" )
f.close()
# pem ファイルのコピー
shutil.copy(pemfile, varsdir)
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(user).gr_gid
os.chown(gitpemfile, uid, gid)
os.chmod(gitpemfile,0600)
# vpcnet.tf ファイルのコピー
f = open(vpcnetfile, "r")
vpcfiledata = f.read()
f.close()
tagname="%s" % ( sysname )
vpcdata = vpcfiledata.replace("SFJTAGS",tagname)
dirs, files = os.path.split(vpcnetfile)
vpcfile="%s/%s" % (varsdir,files)
f = open(vpcfile, "w")
f.write(vpcdata)
f.close()
# acl.tf ファイルのコピー
f = open(aclfile, "r")
aclfiledata = f.read()
f.close()
tagname="%s" % ( sysname )
acldata = aclfiledata.replace("SFJTAGS",tagname)
dirs, files = os.path.split(aclfile)
aclnewfile="%s/%s" % (varsdir,files)
f = open(aclnewfile, "w")
f.write(acldata)
f.close()
# hostN.tf ファイルのコピーと作成
f = open(hostsfile, "r")
hostsfiledata = f.read()
f.close()
f = open(eipfile, "r")
eipdfileata = f.read()
f.close()
for hostno in paradata['hosts']:
no = int(hostno) + 1
# SFJPARAをnoに置換
hostsdata = hostsfiledata.replace("SFJPARA",str(no))
tagname="%s" % ( sysname )
hostsdata = hostsdata.replace("SFJTAGS",tagname)
tagname="%s-%s" % ( sysname, paradata['hosts'][hostno]['hostname'] )
hostsdata = hostsdata.replace("SFJTAGHOST",tagname)
hostfile="%s/host%d.tf" % (varsdir,no)
f = open(hostfile, "w")
f.write(hostsdata)
eipflag=paradata['hosts'][hostno]['gip'].encode('utf-8')
if eipflag == '有' :
tagname="%s" % ( sysname )
eipdata = eipdfileata.replace("SFJPARA",str(no))
eipdata = eipdata.replace("SFJTAGS",tagname)
f.write(eipdata)
f.close()
sshdir="%s/.ssh" % (homedir)
if os.path.exists(sshdir) is False:
os.makedirs(sshdir)
sfile="%s/config" % (sshdir)
sf = open(sfile, 'w')
gwname=paradata['hosts']['0']['hostname'].encode('utf-8')
for hostno in paradata['hosts']:
if paradata['hosts'][hostno]['role'].encode('utf-8') == "踏み台":
gwname=paradata['hosts'][hostno]['hostname'].encode('utf-8')
sf.write("host %s\n" % gwname)
sf.write(" HostName PUBIP\n")
sf.write(" User %s\n" % accountid )
sf.write(" StrictHostKeyChecking no\n")
sf.write(" IdentityFile %s\n" % gitpemfile )
sf.write("\n" )
for hostno in paradata['hosts']:
sf.write("Host %s-%s\n" % (sysname,paradata['hosts'][hostno]['hostname']) )
sf.write(" HostName %s\n" % paradata['hosts'][hostno]['hostip'] )
sf.write(" User %s\n" % accountid )
sf.write(" StrictHostKeyChecking no\n")
sf.write(" IdentityFile %s\n" % gitpemfile )
sf.write(" ProxyCommand ssh -W %%h:%%p %s\n" % gwname)
sf.write("\n" )
sf.close()
# hostsとymlファイルの作成
for hostno in paradata['hosts']:
monflag=0
# ymlファイルの作成
yfile="%s/%s/%s-%s.yml" % (gitrepodir,sysname,sysname,paradata['hosts'][hostno]['hostname'])
yf = open(yfile, 'w')
yf.write("- hosts: appl-%s\n" % paradata['hosts'][hostno]['hostname'])
yf.write(" become: yes\n")
yf.write(" roles:\n")
for applno in appdata['rolelist']:
appl=appdata['rolelist'][applno]['appname'].encode('utf-8')
roleinfo=appdata['rolelist'][applno]['roleinfo'].encode('utf-8')
monitor=paradata['hosts'][hostno]['monitorname']
if monitor != '' and appl == 'ZabbixAgent' :
yf.write(" - role: %s\n" % roleinfo )
monflag=1
elif roleinfo != '' :
item=paradata['hosts'][hostno].get(appl,"None")
if item != 'None' :
item=item.encode('utf-8')
if item == '有' :
yf.write(" - role: %s\n" % roleinfo )
yf.write("\n" )
if monflag == 1:
yf.write("- hosts: appl-zbx\n")
yf.write(" connection: local\n")
yf.write(" become: yes\n")
yf.write(" roles:\n")
yf.write(" - role: RgistAgent\n")
yf.write("\n" )
yf.close()
# hostsファイルの作成
hfile="%s/%s/%s-%s.host" % (gitrepodir,sysname,sysname,paradata['hosts'][hostno]['hostname'])
hf = open(hfile, 'w')
hf.write("[appl-%s]\n" % paradata['hosts'][hostno]['hostname'])
hf.write("%s-%s\n" % (sysname,paradata['hosts'][hostno]['hostname']) )
hf.write("\n")
hf.write("[appl-%s:vars]\n" % paradata['hosts'][hostno]['hostname'])
hf.write("ansible_ssh_user=%s\n" % paradata['cloud']['0']['accountid'])
hf.write("ansible_ssh_private_key_file=%s\n" % gitpemfile )
hf.write("ansible_server_ip=%s\n" % zbxserv )
hf.write("\n")
if monflag == 1:
hf.write("[appl-zbx]\n")
hf.write("localhost\n")
hf.write("\n")
hf.close()
os.chdir(currentdir)
exit( 0 )
except Exception as e:
os.chdir(currentdir)
print '=== エラー内容 ==='
print 'type:' + str(type(e))
print traceback.format_exc()
if __name__ == "__main__":
argv = sys.argv
argc = len(argv)
if argc <= 1:
print "NEED PROJECTNAME"
exit(1)
sysname=argv[1]
if argc > 2:
parafile=argv[2]
if argc > 3:
dbg=int(argv[3])
sfjmakej2p(argv[1],parafile,dbg)
###############################################################
|
from tests.pages.components.page import Page
from tests.parameters.locators import locator
class YnetHomePage(Page):
def c3_Hor(self):
return self.find_all(locator.c3_Hor)
def evritiframe_1(self):
return self.find_all(locator.evritiframe_1)
def multiarticles_15(self):
return self.find_all(locator.multiarticles_15)
def multiarticles_5(self):
return self.find_all(locator.multiarticles_5)
def close_console(self):
return self.find_all(locator.close_console)
def su_iframe(self):
return self.find_all(locator.su_iframe)
def xButtn(self):
return self.find_all(locator.xButtn)
def console_resize(self):
return self.find_all(locator.console_resize)
def first_title(self):
return self.find_all(locator.first_title)
def arrows(self):
return self.find_all(locator.arrows)
def mainSearchSelectText(self):
return self.find_all(locator.mainSearchSelectText)
def teaserxnet_1(self):
return self.find_all(locator.teaserxnet_1)
def iframe_container(self):
return self.find_all(locator.iframe_container)
def null(self):
return self.find_all(locator.null)
def ads_300x250_4(self):
return self.find_all(locator.ads_300x250_4)
|
import pandas as pd
import csv
import math
import datetime as dt
import os
import sys
import csv
import statistics
import matplotlib.pyplot as plt
from sko.GA import GA
def selecting_buildings_EP(path_test):
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
dict_EPWs = {}
dict_EPWs['FMYs']=['USA_Salt Lake City Intl AP_HadCM3-A2-'+str(2080),'USA_Salt Lake City Intl AP_HadCM3-A2-'+str(2050)]
dict_EPWs['TMYs']=['USA_UT_Salt.Lake.City.Intl.AP.725720_TMY','USA_UT_Salt.Lake.City.725720_TMY2','USA_UT_Salt.Lake.City.Intl.AP.725720_TMY3']
weather_file= dict_EPWs['TMYs'][-1]
year=2019
city=editable_data['city']
lat = float(editable_data['Latitude'])
lon = float(editable_data['Longitude'])
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (8,6)
path_test = os.path.join(sys.path[0])
energy_demands_path = os.path.join(path_test,'Buildings Energy Demands')
prototype_energyplus_path = os.path.join(path_test,'IDFBuildingsFiles')
#energyplus_buildings = ['ApartmentHighRise', 'ApartmentMidRise','Hospital','HotelLarge','HotelSmall','OfficeLarge','OfficeMedium','OfficeSmall','OutPatientHealthCare','RestaurantFastFood','RestaurantSitDown','RetailStandAlone','RetailStripMall','SchoolPrimary','SchoolSecondary','Warehouse']
#energyplus_buildings = ['OfficeLarge','OfficeMedium','OfficeSmall','OutPatientHealthCare','SchoolPrimary','SchoolSecondary','Warehouse']
#energyplus_buildings = ['OfficeMedium','OutPatientHealthCare','RetailStandAlone','SchoolSecondary']
energyplus_buildings = ['OfficeMedium','OfficeMedium','Hospital','RetailStandAlone']
thermal_eff_dict = {'OfficeMedium':0.8,'Hospital':0.8125,'RetailStandAlone':0.82}
energyplus_buildings_rev = []
UoU_final = [ 'SFEBB', 'HEB','EIHG', 'SLC'] #in 2019
JtokWh = 0.27777777777778/10**6
dict_UoU = {}
dict_energyplus = {}
for UoU in UoU_final:
dict_UoU[UoU] = pd.read_csv(os.path.join(energy_demands_path,UoU+'_processed.csv'),index_col=0)
#plt.plot(dict_UoU[UoU]['Electricity (kWh)'], label=UoU)
plt.plot(dict_UoU[UoU]['Heating (kWh)'], label=UoU)
num = 1
for energyplus in energyplus_buildings:
dict_energyplus[energyplus+'_'+str(num)] = pd.read_csv(os.path.join(prototype_energyplus_path ,'ASHRAE901_'+energyplus+'_STD2019_Denver_'+weather_file+'_mtr.csv'),index_col=0)*JtokWh
energyplus_buildings_rev.append(energyplus+'_'+str(num))
#Gas:Facility[J](Hourly) and Heating:Electricity [J](Hourly)
#print(dict_energyplus[energyplus+'_'+str(num)].keys())
if 'Electricity:Facility [J](Hourly)' in dict_energyplus[energyplus+'_'+str(num)].keys():
dict_energyplus[energyplus+'_'+str(num)]['Electricity kWh'] = dict_energyplus[energyplus+'_'+str(num)]['Electricity:Facility [J](Hourly)'] - dict_energyplus[energyplus+'_'+str(num)]['Heating:Electricity [J](Hourly)']
#plt.plot(dict_energyplus[energyplus+'_'+str(num)][''Electricity kWh'], label=[energyplus+'_'+str(num)])
if 'Gas:Facility [J](Hourly)' in dict_energyplus[energyplus+'_'+str(num)].keys() and 'Heating:Electricity [J](Hourly)' in dict_energyplus[energyplus+'_'+str(num)].keys():
dict_energyplus[energyplus+'_'+str(num)]['Heating kWh'] = dict_energyplus[energyplus+'_'+str(num)]['Gas:Facility [J](Hourly)']*thermal_eff_dict[energyplus] + dict_energyplus[energyplus+'_'+str(num)]['Heating:Electricity [J](Hourly)']
plt.plot(dict_energyplus[energyplus+'_'+str(num)]['Heating kWh'], label=[energyplus+'_'+str(num)])
print(energyplus, sum(dict_energyplus[energyplus+'_'+str(num)]['Electricity kWh'])/1000,sum(dict_energyplus[energyplus+'_'+str(num)]['Heating kWh'])/1000 )
num = num + 1
plt.xticks(list(range(1,8760,730)),list(range(1,13)))
plt.legend()
#plt.savefig(os.path.join(path_test,'Electricity_total'+'.png'),dpi=300,facecolor='w')
plt.savefig(os.path.join(path_test,'Heating_total'+'.png'),dpi=300,facecolor='w')
plt.close()
getting_WFs = 'yes'
if getting_WFs == 'yes':
best_x=[]
best_y=[]
for i in range(4):
def Obj_GA(p):
weight_factor= p
MSE_elect =((dict_energyplus[energyplus_buildings_rev[i]]['Electricity kWh']*weight_factor).array - dict_UoU[UoU_final[i]]['Electricity (kWh)'].array)**2
MSE_heating = ((dict_energyplus[energyplus_buildings_rev[i]]['Heating kWh']*weight_factor).array - dict_UoU[UoU_final[i]]['Heating (kWh)'].array)**2
return (sum(MSE_elect)+sum(MSE_heating))/(8760*2)
#print(energyplus_buildings[i],UoU_final[i])
GA_model = GA(func=Obj_GA, n_dim=1, size_pop=50, max_iter=800, lb=[0],)
results = GA_model.run()
best_x.append(results[0])
best_y.append(results[1])
print(results[0],results[1])
print('weight_factor',best_x)
print('MSE',best_y)
weight_factor_2004 = [0.50558832,1.0,0.23360898,1.0]
RSME_WFs_2004 = [4927.3237312**0.5,7279.71216085**0.5,29176.72420875**0.5,4590.48390218**0.5] #kWh
weight_factor = [0.50558832,1.0,0.35786005,1.0]
RSME_WFs = [4894.322211714833**0.5,9010.63054282**0.5,27487.01264646**0.5,6030.52241506**0.5] #kWh
mean_error_WFs = []
for i in range(4):
mean_error_WFs.append(RSME_WFs[i]/statistics.mean(dict_energyplus[energyplus_buildings_rev[i]]['Electricity kWh']+dict_energyplus[energyplus_buildings_rev[i]]['Heating kWh']))
dict_energyplus[energyplus_buildings_rev[i]]['Electricity kWh']=dict_energyplus[energyplus_buildings_rev[i]]['Electricity kWh']*weight_factor[i]
dict_energyplus[energyplus_buildings_rev[i]]['Heating kWh']=dict_energyplus[energyplus_buildings_rev[i]]['Heating kWh']*weight_factor[i]
print(mean_error_WFs)
return dict_energyplus
path_test = os.path.join(sys.path[0])
selecting_buildings_EP(path_test)
|
"""
Temporal NDL
For reference see
Temporal Planning with Clock-Based SMT Encodings
Jussi Rintanen
Proceedings of the 26th Int'l Joint Conference on Artificial Intelligence (IJCAI)
2017
"""
from ..syntax import Atom, CompoundTerm, CompoundFormula, Constant, symref, Connective, Tautology
from ..model import Model
class NDLSyntaxError(Exception):
pass
class UnsupportedFeature(Exception):
pass
class SemanticError(Exception):
pass
class ResourceLock:
def __init__(self, **kwargs):
self.ts = kwargs['ts']
self.td = kwargs['td']
self.r = kwargs['r']
if not isinstance(self.r, CompoundTerm):
raise NDLSyntaxError("NDL Syntactic Error: resource lock needs to be a term (given: {})".format(self.r))
def __str__(self):
return "LOCK {} AFTER {} FOR {}".format(self.r, self.ts, self.td)
class ResourceLevel:
def __init__(self, **kwargs):
self.ts = kwargs['ts']
self.td = kwargs['td']
self.r = kwargs['r']
if not isinstance(self.r, CompoundTerm):
raise NDLSyntaxError("NDL Syntactic Error: resource lock must refer to term (given: {})".format(self.r))
self.n = kwargs['n']
if not isinstance(self.n, Constant):
raise NDLSyntaxError("NDL Syntactic Error: resource level must be a constant (given: {}".format(self.n))
if self.n.sort != self.r.sort:
raise NDLSyntaxError(
"NDL Type Mismatch: resource and level have different sorts (resource is: {}, level is: {}".format(
self.r.sort, self.n.sort))
def __str__(self):
return "LOCK {} AFTER {} FOR {}".format(self.r, self.ts, self.td)
class SetLiteralEffect(object):
"""
Set literal truth value
"""
def __init__(self, lit, value):
self.l = lit
self.value = value
def __str__(self):
return "SET({}, {})".format(self.l, self.value)
class AssignValueEffect(object):
"""
Sets equality constraint
"""
def __init__(self, atom, value):
self.atom = atom
self.value = value
def __str__(self):
return "ASSIGN({}, {})".format(self.atom, self.value)
class UniversalEffect(object):
"""
Forall effect
"""
def __init__(self, variable, effect):
self.var = variable
self.eff = effect
def __str__(self):
return "FORALL({}, {})".format(self.var, self.effect)
class ConditionalEffect(object):
"""
If Then Else effect
"""
def __init__(self, cond, then_eff, else_eff):
self.condition = cond
self.then_eff = then_eff
self.else_eff = else_eff
def __str__(self):
return "IF ({}) \nTHEN {}\n ELSE {}".format(self.condition, self.then_eff, self.else_eff)
class TimedEffect(object):
"""
(t, eff) time-delayed effect
"""
def __init__(self, delay, eff):
self.delay = delay
self.eff = eff
def __str__(self):
return "AFTER {} APPLY {}".format(self.delay, self.eff)
class UnionExpression(object):
"""
A union set expression
"""
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def is_literal(l):
if not isinstance(l, Atom):
if isinstance(l, CompoundFormula):
return l.connective == Connective.Not and isinstance(l.subformulas[0], Atom)
return False
return True
class Action:
"""
An action with resources is made of:
- precondition: a propositional formula over some first-order language
- a set of resource requirements:
- [locks] (ts, td, r) subseteq Q x Q x R where Q is the rationals and R is a set of terms mapping
to the naturals
- [levels] (ts, td, r, n) subset Q x Q R x N where Q is the rationals and R is a term mapping to
the naturals, required
to have a specific value
such that td >= 0
- an effect: a set of pairs (t,l) where t in Q, t >=0, l is a literal
"""
def __init__(self, **kwargs):
self.name = kwargs['name']
self.parameters = kwargs['parameters']
self.max_eff_time = 0.0
self.effect_times = {}
# precondition
prec = kwargs['precondition']
if not isinstance(prec, CompoundFormula) \
and not isinstance(prec, Atom)\
and not isinstance(prec, Tautology):
raise NDLSyntaxError(
"NDL Syntactic Error: precondition of action must be a compound formula, atom or tautology (given: {})".format(prec))
self.precondition = prec
# resource requirements
self.locks = []
self.levels = []
for req in kwargs['requirements']:
if isinstance(req.eff, ResourceLock):
self.locks += [req]
self.max_eff_time = max(self.max_eff_time, req.eff.td)
elif isinstance(req.eff, ResourceLevel):
self.levels += [req]
self.max_eff_time = max(self.max_eff_time, req.eff.td)
else:
raise NDLSyntaxError("NDL syntax error: '{}' is not a resource lock or level request".format(req))
# effects
self.untimed_effects = []
self.timed_effects = []
for eff in kwargs['timed_effects']:
if not isinstance(eff, TimedEffect):
raise NDLSyntaxError("NDL Syntax error: eff '{}' must be timed".format(eff))
self.timed_effects += [eff]
self.max_eff_time = max(self.max_eff_time, eff.delay)
wrapped_effect = eff.eff
if isinstance(wrapped_effect, AssignValueEffect):
self.effect_times[symref(wrapped_effect.atom == eff.eff.value)] = eff.delay
elif isinstance(wrapped_effect, SetLiteralEffect):
self.effect_times[(symref(wrapped_effect.l), wrapped_effect.value)] = eff.delay
else:
raise NotImplementedError("Effects of type {} cannot be handled yet".format(type(wrapped_effect)))
for l in kwargs['untimed_effects']:
self.untimed_effects += [(0, l)]
def get_effect_time(self, l):
return self.effect_times[l]
# class Instance:
#
# def __init__(self, **kwargs):
# self.language = kwargs['L']
# self.X = []
# for x in kwargs['X']:
# if not isinstance(x, Atom):
# raise NDLSyntaxError("NDL Syntax Error: State variables must be boolean terms, found: {}".format(x))
# self.X += [x]
# init = kwargs['I']
# if not isinstance(init, Model):
# raise UnsupportedFeature("NDL Unsupported feature: initial state must be instance of tarski.Model")
# if init.evaluator is None:
# raise SemanticError("NDL Semantic Error: initial state evaluator was not specified")
# self.I = init
# goal = kwargs['G']
# if not isinstance(goal, CompoundFormula) and not isinstance(goal, Atom):
# raise NDLSyntaxError("NDL Syntax Error: Goal needs to be a compound formula or an atom")
# self.G = goal
# self.A = []
# self.R = set()
# for act in kwargs['A']:
# if isinstance(act, Action):
# self.A += [act]
# # collect resources
# for lock in act.locks:
# self.R.add(symref(lock.r))
# for level in act.levels:
# self.R.add(symref(level.r))
|
import logging
from mayan.apps.documents.models.document_models import Document
from mayan.apps.documents.permissions import (
permission_document_create, permission_document_new_version
)
from mayan.apps.documents.tests.base import DocumentTestMixin
from mayan.apps.documents.tests.literals import TEST_SMALL_DOCUMENT_PATH
from mayan.apps.sources.tests.mixins import (
DocumentUploadWizardViewTestMixin, DocumentVersionUploadViewTestMixin,
SourceTestMixin
)
from mayan.apps.testing.tests.base import GenericViewTestCase
from ..classes import QuotaBackend
from ..exceptions import QuotaExceeded
from ..quota_backends import DocumentCountQuota, DocumentSizeQuota
class QuotaHooksTestCase(
DocumentTestMixin, DocumentUploadWizardViewTestMixin,
DocumentVersionUploadViewTestMixin, SourceTestMixin, GenericViewTestCase
):
auto_upload_test_document = False
def setUp(self):
super(QuotaHooksTestCase, self).setUp()
# Increase the initial usage count to 1 by uploading a document
# as the test case user.
self._upload_test_document(_user=self._test_case_user)
self.test_case_silenced_logger_new_level = logging.FATAL + 10
self._silence_logger(name='mayan.apps.sources.views')
self._silence_logger(name='mayan.apps.logging.middleware.error_logging')
def tearDown(self):
QuotaBackend.connect_signals()
super(QuotaHooksTestCase, self).tearDown()
def test_document_quantity_quota_and_source_upload_wizard_view_with_permission(self):
self.test_quota_backend = DocumentCountQuota
self.test_quota = DocumentCountQuota.create(
documents_limit=1,
document_type_all=True,
document_type_ids=(),
group_ids=(),
user_all=True,
user_ids=(),
)
self.test_quota_backend.signal.disconnect(
dispatch_uid='quotas_handler_process_signal',
sender=self.test_quota_backend.sender
)
self.grant_permission(permission=permission_document_create)
document_count = Document.objects.count()
with self.assertRaises(expected_exception=QuotaExceeded):
self._request_upload_wizard_view()
self.assertEqual(Document.objects.count(), document_count)
def test_document_size_quota_and_source_upload_wizard_view_with_permission(self):
self.test_quota_backend = DocumentSizeQuota
self.test_quota = DocumentSizeQuota.create(
document_size_limit=0.01,
document_type_all=True,
document_type_ids=(),
group_ids=(),
user_all=True,
user_ids=(),
)
self.test_quota_backend.signal.disconnect(
dispatch_uid='quotas_handler_process_signal',
sender=self.test_quota_backend.sender
)
self.grant_permission(permission=permission_document_create)
document_count = Document.objects.count()
with self.assertRaises(expected_exception=QuotaExceeded):
self._request_upload_wizard_view()
self.assertEqual(Document.objects.count(), document_count)
def test_document_size_quota_and_document_version_upload_with_access(self):
self.test_quota_backend = DocumentSizeQuota
self.test_quota = DocumentSizeQuota.create(
document_size_limit=0.01,
document_type_all=True,
document_type_ids=(),
group_ids=(),
user_all=True,
user_ids=(),
)
self.test_quota_backend.signal.disconnect(
dispatch_uid='quotas_handler_process_signal',
sender=self.test_quota_backend.sender
)
self.grant_access(
obj=self.test_document,
permission=permission_document_new_version
)
version_count = self.test_document.versions.count()
with self.assertRaises(expected_exception=QuotaExceeded):
with open(file=TEST_SMALL_DOCUMENT_PATH, mode='rb') as file_object:
self._request_document_version_upload_view(
source_file=file_object
)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.versions.count(), version_count
)
|
from __future__ import absolute_import
from ...ags._geoprocessing import *
from ...common.geometry import Polygon, Polyline, Point, SpatialReference, Envelope
from ..._abstract import abstract
########################################################################
class hydrology(abstract.BaseAGOLClass):
"""
The data being operated on are maintained by Esri and made available to
you through these tasks. A primary benefit of using these data sources
is that a lot of the hard work has already been done, freeing you up to
just work on performing analysis instead of having to worry about
compiling, processing and storing very large datasets of continental
and global scales on your local machine or network.
Find out more here:
https://developers.arcgis.com/rest/elevation/api-reference/source-data-for-hydrology-analysis-tasks.htm
Inputs:
securityHandler - arcgis online security handler
url - orginization url
ex: http://www.arcgis.com
proxy_url - IP/address of proxy
proxy_port - port # of proxy is on.
"""
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_service_url = None
_gpService = None
#----------------------------------------------------------------------
def __init__(self,
securityHandler,
url=None,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if url is None:
self._url = "https://www.arcgis.com/sharing/rest"
else:
if url.find("/sharing/rest") == -1:
url = url + "/sharing/rest"
self._url = url
self._securityHandler = securityHandler
self._proxy_url = proxy_url
self._proxy_port = proxy_port
self.__init_url()
#----------------------------------------------------------------------
def __init_url(self):
"""loads the information into the class"""
portals_self_url = "{}/portals/self".format(self._url)
params = {
"f" :"json"
}
if not self._securityHandler is None:
params['token'] = self._securityHandler.token
res = self._get(url=portals_self_url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
if "helperServices" in res:
helper_services = res.get("helperServices")
if "hydrology" in helper_services:
analysis_service = helper_services.get("elevation")
if "url" in analysis_service:
self._analysis_url = analysis_service.get("url")
self._gpService = GPService(url=self._analysis_url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False)
#----------------------------------------------------------------------
@property
def tasks(self):
"""returns a list of GPTask objects for GPService"""
if self._gpService is None:
self.__init_url()
return self._gpService.tasks
#----------------------------------------------------------------------
@property
def gpService(self):
"""returns the geoprocessing service object"""
if self._gpService is None:
self.__init_url()
return self._gpService
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import DateParser
'''
TESTE DIAS DA SEMANA
'''
def test_day_of_week_domingo():
date = DateParser.DateParser()
date.convert("22/02/2015")
assert date.dayOfWeek() == "Domingo"
def test_day_of_week_terca():
date = DateParser.DateParser()
date.convert("25/11/2014")
assert date.dayOfWeek() == "Terça"
def test_day_of_week_quinta():
date = DateParser.DateParser()
date.convert("18/02/2016")
assert date.dayOfWeek() == "Quinta"
def test_day_of_week_sexta():
date = DateParser.DateParser()
date.convert("09/01/2015")
assert date.dayOfWeek() == "Sexta"
'''
TESTE MESES DO ANO
'''
def test_month_name_fevereiro():
date = DateParser.DateParser()
date.convert("22/02/2015")
assert date.monthName() == "Fevereiro"
def test_month_name_outubro():
date = DateParser.DateParser()
date.convert("28/10/1987")
assert date.monthName() == "Outubro"
def test_month_name_maio():
date = DateParser.DateParser()
date.convert("26/05/1985")
assert date.monthName() == "Maio"
'''
TESTE ESTAÇÕES DO ANO
'''
def test_seasson_easy_verao():
date = DateParser.DateParser()
date.convert("26/01/2015")
assert date.season() == "Verão"
def test_seasson_easy_outono():
date = DateParser.DateParser()
date.convert("26/04/2015")
assert date.season() == "Outono"
def test_seasson_easy_inverno():
date = DateParser.DateParser()
date.convert("26/08/2015")
assert date.season() == "Inverno"
def test_seasson_easy_primavera():
date = DateParser.DateParser()
date.convert("26/11/2015")
assert date.season() == "Primavera"
def test_seasson_transition_verao_outono():
date = DateParser.DateParser()
date.convert("21/03/2015")
assert date.season() == "Verão"
date.convert("22/03/2015")
assert date.season() == "Outono"
def test_seasson_transition_outono_inverno():
date = DateParser.DateParser()
date.convert("21/06/2015")
assert date.season() == "Outono"
date.convert("22/06/2015")
assert date.season() == "Inverno"
def test_seasson_transition_inverno_primavera():
date = DateParser.DateParser()
date.convert("23/09/2015")
assert date.season() == "Inverno"
date.convert("24/09/2015")
assert date.season() == "Primavera"
def test_seasson_transition_primavera_verao():
date = DateParser.DateParser()
date.convert("21/12/2015")
assert date.season() == "Primavera"
date.convert("22/12/2015")
assert date.season() == "Verão"
'''
TESTE STATUS DO DIA (FERIADO,PRE_FERIADO,POS_FERIADO,NORMAL)
'''
def test_holiday_natal():
date = DateParser.DateParser()
date.convert("25/12/2015")
assert date.dateStatus() == "Feriado"
def test_holiday_pre_feriado_natal():
date = DateParser.DateParser()
date.convert("16/12/2015")
assert date.dateStatus() == "Pré-Feriado"
def test_holiday_pos_feriado_natal():
date = DateParser.DateParser()
date.convert("28/12/2015")
assert date.dateStatus() == "Pós-Feriado"
def test_holiday_preferiado():
date = DateParser.DateParser()
date.convert("02/03/2015")
assert date.dateStatus() == "Pré-Feriado"
def test_holiday_posferiado():
date = DateParser.DateParser()
date.convert("08/03/2015")
assert date.dateStatus() == "Pós-Feriado"
def test_holiday_normal():
date = DateParser.DateParser()
date.convert("11/03/2015")
assert date.dateStatus() == "Normal"
'''
DIA ÚTIL NO MÊS
'''
def test_month_business_day_marco():
date = DateParser.DateParser()
date.convert("11/03/2014")
assert date.monthBusinessDay() == 6
def test_month_business_day_abril():
date = DateParser.DateParser()
date.convert("22/04/2014")
assert date.monthBusinessDay() == 14
def test_month_business_day_sabado():
date = DateParser.DateParser()
date.convert("29/11/2014")
assert date.monthBusinessDay() == -1
def test_month_business_day_feriado():
date = DateParser.DateParser()
date.convert("25/12/2014")
assert date.monthBusinessDay() == 0
|
"""project bootstrap for various frameworks and languages
"""
__author__ = "Gaurav Verma"
__email__ = "diszgaurav@gmail.com"
#----------------------------------------------------------------------
import os
import logging
import shutil
import re
import fileinput
import time
#----------------------------------------------------------------------
class ProjectsCollection(object):
"""Projects Collection Class
"""
def __init__(self):
self.__projecture_dir = os.path.abspath(os.path.dirname(__file__))
self.__projects_dir = os.path.join(self.__projecture_dir, 'projects')
# build supported languages
self.__supported_projects = []
for prj in os.listdir(self.__projects_dir):
# TODO: add dis-qualifiers
self.__supported_projects.append(prj)
def create_project(self, project,
project_type='python',
author_name='author_name',
author_email='author_email',
about='short description of project',
force=False):
"""create bootstrap project structure for specified language
:returns: execution status
:rtype: int
"""
if project_type not in self.__supported_projects:
msg = 'project for "{}" not supported!'.format(project_type)
logging.error(msg)
print msg
return 1
project_dst, project_name = self.__get_project_path_name(project)
project_src = os.path.join(self.__projects_dir, project_type)
project_src = os.path.join(project_src, 'myproject')
if os.path.exists(project_dst):
if not force:
msg = 'Project "{}" already exists. Use -f or --force to overwrite it'.format(project)
logging.info(msg)
print msg
return 0
else:
shutil.rmtree(project_dst)
shutil.copytree(project_src, project_dst, symlinks=True)
logging.debug('renaming dirs/files')
# need to walk twice!
for i in range(2):
for root, dirs, files in os.walk(project_dst):
for f in dirs if i else files:
f_old = os.path.join(root, f)
# file content
if not i:
for line in fileinput.input(files=f_old, inplace=True):
line = re.sub('myproject:author_name', author_name.title(), line)
line = re.sub('myproject:author_email', author_email, line)
line = re.sub('myproject:year', time.strftime('%Y'), line)
line = re.sub('myproject:about', about, line)
line = re.sub('myproject', project_name, line)
print line.rstrip()
# file names
f_new = os.path.join(root, re.sub('myproject', project_name, f))
os.rename(f_old, f_new)
exit_msg = '"{}" project created for {}'.format(project_name, project_type)
logging.info(exit_msg)
print exit_msg
return 0
def list_projects(self):
"""returns all supported projects as a list
:returns: supported projects
:rtype: list
"""
return self.__supported_projects
def __get_project_path_name(self, project):
"""retrieve project's absolute path and just name
:param project: project name with absolute/relative path
:returns: project path and name
:rtype: tuple
"""
project_name = os.path.split(project)[-1]
project_path = os.path.abspath(project)
return (project_path, project_name)
|
from functools import lru_cache, partial
from pathlib import Path
from typing import Tuple, Union
from matplotlib.axes._subplots import Axes
import pandas as pd
import pyproj
from shapely.geometry import Point, base
from shapely.ops import transform
class DataFrameMixin(object):
"""DataFrameMixin aggregates a pandas DataFrame and provides the same
representation methods.
"""
def __init__(self, data: pd.DataFrame) -> None:
self.data: pd.DataFrame = data
@classmethod
def from_file(cls, filename: Union[Path, str]):
path = Path(filename)
if path.suffixes in [[".pkl"], [".pkl", ".gz"]]:
return cls(pd.read_pickle(path))
if path.suffixes == [".csv"]:
return cls(pd.read_csv(path))
if path.suffixes == [".h5"]:
return cls(pd.read_hdf(path))
return None
# --- Special methods ---
def _repr_html_(self):
return self.data._repr_html_()
def __repr__(self):
return self.data.__repr__()
def __len__(self) -> int:
return self.data.shape[0]
# --- Redirected to pandas.DataFrame ---
def to_pickle(self, filename: Union[str, Path]) -> None:
self.data.to_pickle(filename)
def to_csv(self, filename: Union[str, Path]) -> None:
self.data.to_csv(filename)
def to_hdf(self, filename: Union[str, Path]) -> None:
self.data.to_hdf(filename)
def to_json(self, filename: Union[str, Path]) -> None:
self.data.to_json(filename)
def to_excel(self, filename: Union[str, Path]) -> None:
self.data.to_excel(filename)
def query(self, query: str):
return self.__class__(self.data.query(query))
def groupby(self, *args, **kwargs):
return self.data.groupby(*args, **kwargs)
def assign(self, *args, **kwargs):
return self.__class__(self.data.assign(*args, **kwargs))
class ShapelyMixin(object):
"""ShapelyMixin expects a shape attribute as a Geometry and provides methods
consistent with GIS geometries.
However no plot method is provided at this level because it depends on the
nature of the shape.
"""
shape: base.BaseGeometry
# --- Properties ---
@property
def bounds(self) -> Tuple[float, float, float, float]:
"""Returns the bounds of the shape.
Bounds are given in the following order in the origin crs:
west, south, east, north
"""
return self.shape.bounds
@property
def extent(self) -> Tuple[float, float, float, float]:
"""Returns the extent of the shape.
Extent is given in the following order in the origin crs:
west, east, south, north
This method is convenient for the ax.set_extent method
"""
west, south, east, north = self.bounds
return west, east, south, north
@property
def centroid(self) -> Point:
"""Returns the centroid of the shape."""
return self.shape.centroid
@property
def area(self) -> float:
"""Returns the area of the shape, in square meters."""
return self.project_shape().area
# --- Representations ---
def _repr_svg_(self):
project = self.project_shape()
if project is not None:
return project._repr_svg_()
def _repr_html_(self) -> str:
no_wrap_div = '<div style="white-space: nowrap">{}</div>'
return no_wrap_div.format(self._repr_svg_())
@lru_cache()
def project_shape(
self, projection: pyproj.Proj = None
) -> base.BaseGeometry:
"""Projection for a decent representation of the structure.
By default, an equivalent projection is applied. Equivalent projections
locally respect areas, which is convenient for the area attribute.
"""
if self.shape is None:
return None
if projection is None:
bounds = self.bounds
projection = pyproj.Proj(
proj="aea", # equivalent projection
lat1=bounds[1],
lat2=bounds[3],
lon1=bounds[0],
lon2=bounds[2],
)
return transform(
partial(
pyproj.transform, pyproj.Proj(init="EPSG:4326"), projection
),
self.shape,
)
class GeographyMixin(object):
"""Adds Euclidean coordinates to a latitude/longitude DataFrame."""
data: pd.DataFrame
def compute_xy(self, projection: pyproj.Proj = None):
"""Computes x and y columns from latitudes and longitudes.
The source projection is WGS84 (EPSG 4326).
The default destination projection is a Lambert Conformal Conical
projection centered on the data inside the dataframe.
For consistency reasons with pandas DataFrame, a new Traffic structure
is returned.
"""
if projection is None:
projection = pyproj.Proj(
proj="lcc",
lat_1=self.data.latitude.min(),
lat_2=self.data.latitude.max(),
lat_0=self.data.latitude.mean(),
lon_0=self.data.longitude.mean(),
)
x, y = pyproj.transform(
pyproj.Proj(init="EPSG:4326"),
projection,
self.data.longitude.values,
self.data.latitude.values,
)
return self.__class__(self.data.assign(x=x, y=y))
class PointMixin(object):
latitude: float
longitude: float
def plot(
self, ax: Axes, text_kw=None, shift=dict(units="dots", x=15), **kwargs
):
if text_kw is None:
text_kw = {}
if "projection" in ax.__dict__ and "transform" not in kwargs:
from cartopy.crs import PlateCarree
from matplotlib.transforms import offset_copy
kwargs["transform"] = PlateCarree()
geodetic_transform = PlateCarree()._as_mpl_transform(ax)
text_kw["transform"] = offset_copy(geodetic_transform, **shift)
if "color" not in kwargs:
kwargs["color"] = "black"
ax.scatter(self.longitude, self.latitude, **kwargs)
ax.text(self.longitude, self.latitude, **text_kw)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class DeleteVpcRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'DeleteVpc','Vpc')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
|
import functools
from contextlib import contextmanager
from inspect import isfunction
from typing import Callable, Optional, Union
from zhtools import ignore_exception
from zhtools.exceptions import ModuleRequired
_redis_pool = None
_redis_client = None
def init(redis_client=None, redis_pool=None):
global _redis_pool
global _redis_client
_redis_client = redis_client
_redis_pool = redis_pool
def get_client():
if _redis_client:
return _redis_client
if _redis_pool:
try:
import redis
return redis.Redis(connection_pool=_redis_client)
except ImportError:
raise ModuleRequired('redis')
@contextmanager
def concurrent_lock(key: str,
ttl: int = 3,
blocking_timeout: Optional[int] = None,
redis_cli=None):
"""
>>> with concurrent_lock(key='lock') as l:
>>> if not l:
>>> raise Exception()
>>> ...
"""
redis_cli = redis_cli or get_client()
lock = redis_cli.lock(key,
timeout=ttl,
blocking_timeout=blocking_timeout)
locked = lock.acquire()
try:
yield locked
finally:
if locked:
with ignore_exception():
lock.release()
class GetLockError(Exception):
pass
def concurrent_limit(key: Union[str, Callable],
ttl: int = 3,
blocking_timeout: Optional[int] = None):
"""
Use directly
>>> @concurrent_limit
>>> def foo(k):
>>> pass
Use with parameters
>>> @concurrent_limit(key='lock')
>>> def foo():
>>> pass
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
nonlocal key
if isfunction(key):
key = key.__name__
with concurrent_lock(key, ttl, blocking_timeout) as lock:
if not lock:
raise GetLockError()
return func(*args, **kwargs)
return wrapped
if isfunction(key):
return decorator(key)
return decorator
|
class Solution(object):
def XXX(self, n):
"""
:type n: int
:rtype: List[str]
"""
#已放入的左括号数目至少要大于等于右括号
#二叉树,一个字典存入每层树节点,另一个字典存入相对应每个节点的左括号个数
#剪枝
r_map = {1:['(']}
count_map = {1:[1]} #左括号个数字典
layers = 2*n
for i in range(2,layers+1):
count = 0
if i not in r_map:
r_map[i] = []
count_map[i] = []
for item in r_map[i-1]:
left_count = count_map[i-1][count] #左括号个数
right_count = i - left_count - 1 #右括号个数
if left_count != n :
if left_count > right_count:
r_map[i].append(item+')')
count_map[i].append(left_count)
r_map[i].append(item+'(')
left_count += 1
count_map[i].append(left_count)
else:
r_map[i].append(item+'(')
left_count += 1
count_map[i].append(left_count)
else:
r_map[i].append(item+')')
count_map[i].append(left_count)
count = count + 1
return r_map[2*n]
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from akg.utils import kernel_exec as utils
from tests.common.test_op import clip_by_value
from tests.common.tensorio import compare_tensor
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
def clip_by_value_execute(shapes, dtype, attrs):
exp_output, inputs, args = gen_data(dtype, shapes)
mod = clip_by_value_compile(shapes, dtype, attrs)
# result_tvm
acu_output = utils.mod_launch(mod, args, expect=exp_output)
# compare result
rtol, atol = get_rtol_atol("clip_by_value", dtype)
TestCase_Result = compare_tensor(acu_output, exp_output, rtol=rtol, atol=atol, equal_nan=True)
return inputs, acu_output, exp_output, TestCase_Result
def gen_data(dtype, shapes):
# Result_Numpy
data = random_gaussian(shapes[0], miu=0, sigma=1).astype(dtype)
clip_min_value = random_gaussian(shapes[1], miu=-1, sigma=0.1).astype(dtype)
clip_max_value = clip_min_value + 2
res_max = np.maximum(data, clip_min_value)
exp_output = np.minimum(res_max, clip_max_value)
# inputs and output to hold the data
output = np.full(shapes[0], np.nan, dtype)
inputs = [data, clip_min_value, clip_max_value]
args = [data, clip_min_value, clip_max_value, output]
return exp_output, inputs, args
def clip_by_value_compile(shapes, dtype, attrs, kernel_name='clip_by_value', runing=False):
return utils.op_build_test(clip_by_value.clip_by_value, [shapes[0], shapes[1], shapes[2]], [dtype, dtype, dtype], kernel_name=kernel_name, attrs=attrs, tuning=runing)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from Text_A_yolo_model_file.common_yolo.text_proposal_graph_builder import TextProposalGraphBuilder
class TextProposalConnector:
def __init__(self, MAX_HORIZONTAL_GAP=30, MIN_V_OVERLAPS=0.6, MIN_SIZE_SIM=0.6):
self.graph_builder = TextProposalGraphBuilder(MAX_HORIZONTAL_GAP, MIN_V_OVERLAPS, MIN_SIZE_SIM)
def group_text_proposals(self, text_proposals, scores, im_size):
graph = self.graph_builder.build_graph(text_proposals, scores, im_size)
return graph.sub_graphs_connected()
def fit_y(self, X, Y, x1, x2):
len(X) != 0
# if X only include one point, the function will get line y=Y[0]
if np.sum(X == X[0]) == len(X):
return Y[0], Y[0]
p = np.poly1d(np.polyfit(X, Y, 1))
return p(x1), p(x2)
def get_text_lines(self, text_proposals, scores, im_size):
"""
text_proposals:boxes
"""
# tp=text proposal
tp_groups = self.group_text_proposals(text_proposals, scores, im_size) ##find the text line
text_lines = np.zeros((len(tp_groups), 8), np.float32)
newscores = np.zeros((len(tp_groups),), np.float32)
for index, tp_indices in enumerate(tp_groups):
text_line_boxes = text_proposals[list(tp_indices)]
# num = np.size(text_line_boxes)##find
X = (text_line_boxes[:, 0] + text_line_boxes[:, 2]) / 2
Y = (text_line_boxes[:, 1] + text_line_boxes[:, 3]) / 2
z1 = np.polyfit(X, Y, 1)
# p1 = np.poly1d(z1)
x0 = np.min(text_line_boxes[:, 0])
x1 = np.max(text_line_boxes[:, 2])
offset = (text_line_boxes[0, 2] - text_line_boxes[0, 0]) * 0.5
lt_y, rt_y = self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 1], x0 + offset, x1 - offset)
lb_y, rb_y = self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 3], x0 + offset, x1 - offset)
# the score of a text line is the average score of the scores
# of all text proposals contained in the text line
score = scores[list(tp_indices)].sum() / float(len(tp_indices))
text_lines[index, 0] = x0
text_lines[index, 1] = min(lt_y, rt_y)
text_lines[index, 2] = x1
text_lines[index, 3] = max(lb_y, rb_y)
text_lines[index, 4] = score
text_lines[index, 5] = z1[0]
text_lines[index, 6] = z1[1]
height = np.mean((text_line_boxes[:, 3] - text_line_boxes[:, 1]))
text_lines[index, 7] = height + 2.5
newscores[index] = score
return text_lines, newscores
|
import logging
from pathlib import Path
from typing import Dict, List
from overhave.entities.feature.abstract import IFeatureExtractor
from overhave.entities.feature.errors import FeatureTypeExtractionError, ScenariosTestFileNotFound
from overhave.entities.feature.types import FeatureTypeName
from overhave.entities.settings import OverhaveFileSettings
logger = logging.getLogger(__name__)
class FeatureExtractor(IFeatureExtractor):
""" Class for specified project's feature types resolution. """
def __init__(self, file_settings: OverhaveFileSettings):
self._file_settings = file_settings
self._feature_types: List[FeatureTypeName] = []
self._feature_type_to_dir_mapping: Dict[FeatureTypeName, Path] = {}
try:
self._extract_project_data()
self._check_pytest_bdd_scenarios_test_files()
except FeatureTypeExtractionError:
logger.exception("Extraction error while trying to collect features!")
def _extract_project_data(self) -> None:
feature_type_dirs = []
try:
feature_type_dirs = [
d
for d in self._file_settings.features_dir.iterdir()
if all(
(
d.is_dir(),
d != self._file_settings.tmp_dir,
not d.name.startswith("."),
not d.name.startswith("_"),
)
)
]
except FileNotFoundError:
pass
if not feature_type_dirs:
raise FeatureTypeExtractionError(
f"Could not find any subdirectory in specified features directory '{self._file_settings.features_dir}'!"
)
self._feature_types = [FeatureTypeName(t.name) for t in feature_type_dirs]
logger.info("Registered feature types: %s", self._feature_types)
self._feature_type_to_dir_mapping = {FeatureTypeName(t.name): t for t in feature_type_dirs}
def _check_pytest_bdd_scenarios_test_files(self) -> None:
for feature_type in self._feature_types:
scenarios_file = self._file_settings.fixtures_dir / self._file_settings.fixtures_file_template_mask.format(
feature_type=feature_type
)
if scenarios_file.exists():
continue
raise ScenariosTestFileNotFound(
"Could not find pytest-bdd test file with scenarios definition! "
f"Maybe you don't created '{scenarios_file.name}' in '{scenarios_file.parent}' directory?"
)
@property
def feature_types(self) -> List[FeatureTypeName]:
return self._feature_types
@property
def feature_type_to_dir_mapping(self) -> Dict[FeatureTypeName, Path]:
return self._feature_type_to_dir_mapping
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test that a storage's values persist across open and close."""
from ZODB.utils import load_current
class PersistentStorage(object):
def checkUpdatesPersist(self):
oids = []
def new_oid_wrapper(l=oids, new_oid=self._storage.new_oid):
oid = new_oid()
l.append(oid)
return oid
self._storage.new_oid = new_oid_wrapper
self._dostore()
oid = self._storage.new_oid()
revid = self._dostore(oid)
oid = self._storage.new_oid()
revid = self._dostore(oid, data=1)
revid = self._dostore(oid, revid, data=2)
self._dostore(oid, revid, data=3)
# keep copies of all the objects
objects = []
for oid in oids:
p, s = load_current(self._storage, oid)
objects.append((oid, '', p, s))
self._storage.close()
self.open()
# keep copies of all the objects
for oid, ver, p, s in objects:
_p, _s = load_current(self._storage, oid)
self.assertEqual(p, _p)
self.assertEqual(s, _s)
|
# Copyright 2021-2022 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""
Pytest shell utilities plugin.
"""
import pytest
from pytestshellutils.shell import Subprocess
@pytest.fixture # type: ignore[misc]
def shell() -> Subprocess:
"""
Shell fixture.
"""
return Subprocess()
|
import os
from pathlib import Path
import time
from anuvaad_auditor.errorhandler import post_error
from anuvaad_auditor.errorhandler import post_error_wf
import config
class FileOperation(object):
def __init__(self):
self.download_folder = None
# creating directory if it is not existed before.
def file_download(self, downloading_folder):
self.download_folder = downloading_folder
download_dir = Path(os.path.join(os.curdir,self.download_folder))
if download_dir.exists() is False:
os.makedirs(download_dir)
return download_dir
# checking directory exists or not
def check_path_exists(self, dir):
if dir is not None and os.path.exists(dir) is True:
return True
else:
return False
# checking file extension of received file type
def check_file_extension(self, file_type):
allowed_extensions = ['txt','csv']
if file_type in allowed_extensions:
return True
else:
return False
# generating input filepath for input filename
def input_path(self, input_filename):
input_filepath = os.path.join('upload', input_filename)
return input_filepath
# generating output filepath for output filename
def output_path(self,i, DOWNLOAD_FOLDER):
output_filename = '%d-'%i + str(time.time()).replace('.', '') + '.txt'
output_filepath = os.path.join(DOWNLOAD_FOLDER, output_filename)
return output_filepath , output_filename
# reading content of input text file
def read_file(self, input_filename):
input_filepath = self.input_path(input_filename)
with open(input_filepath, 'r', encoding='utf-16') as f:
input_file_data = f.readlines()
return input_file_data
# extracting data from received json input
def json_input_format(self, json_data):
input_files = json_data["input"]['files']
workflow_id = json_data['workflowCode']
jobid = json_data['jobID']
tool_name = json_data['tool']
step_order = json_data['stepOrder']
return input_files, workflow_id, jobid, tool_name, step_order
# extracting input file features
def accessing_files(self,files):
filepath = files['path']
file_type = files['type']
locale = files['locale']
return filepath, file_type, locale
# output format for individual pdf file
def one_filename_response(self, input_filename, output_filename, in_locale, in_file_type):
file_res = {
"inputFile" : input_filename,
"outputFile" : output_filename,
"outputLocale" : in_locale,
"outputType" : in_file_type
}
return file_res
# error manager integration
def error_handler(self, object_in, code, iswf):
if iswf:
object_in['status'] = "FAILED"
object_in['state'] = config.TASK_STAT
error = post_error_wf(code, object_in['message'], object_in, None)
return error
else:
code = code
message = ""
error = post_error(code, message, None)
return error
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.providers.microsoft.azure.hooks.azure_cosmos import AzureCosmosDBHook
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class AzureCosmosDocumentSensor(BaseSensorOperator):
"""
Checks for the existence of a document which
matches the given query in CosmosDB. Example:
.. code-block::
azure_cosmos_sensor = AzureCosmosDocumentSensor(
database_name="somedatabase_name",
collection_name="somecollection_name",
document_id="unique-doc-id",
azure_cosmos_conn_id="azure_cosmos_default",
task_id="azure_cosmos_sensor")
:param database_name: Target CosmosDB database_name.
:type database_name: str
:param collection_name: Target CosmosDB collection_name.
:type collection_name: str
:param document_id: The ID of the target document.
:type document_id: str
:param azure_cosmos_conn_id: Reference to the Azure CosmosDB connection.
:type azure_cosmos_conn_id: str
"""
template_fields = ('database_name', 'collection_name', 'document_id')
@apply_defaults
def __init__(
self,
*,
database_name: str,
collection_name: str,
document_id: str,
azure_cosmos_conn_id: str = "azure_cosmos_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.azure_cosmos_conn_id = azure_cosmos_conn_id
self.database_name = database_name
self.collection_name = collection_name
self.document_id = document_id
def poke(self, context: dict) -> bool:
self.log.info("*** Intering poke")
hook = AzureCosmosDBHook(self.azure_cosmos_conn_id)
return hook.get_document(self.document_id, self.database_name, self.collection_name) is not None
|
"""Conversations API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class ConversationsAPI(BaseCanvasAPI):
"""Conversations API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for ConversationsAPI."""
super(ConversationsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.ConversationsAPI")
def list_conversations(
self,
filter=None,
filter_mode=None,
include=None,
include_all_conversation_ids=None,
interleave_submissions=None,
scope=None,
):
"""
List conversations.
Returns the paginated list of conversations for the current user, most
recent ones first.
"""
path = {}
data = {}
params = {}
# OPTIONAL - scope
"""
When set, only return conversations of the specified type. For example,
set to "unread" to return only conversations that haven't been read.
The default behavior is to return all non-archived conversations (i.e.
read and unread).
"""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
params["scope"] = scope
# OPTIONAL - filter
"""
When set, only return conversations for the specified courses, groups
or users. The id should be prefixed with its type, e.g. "user_123" or
"course_456". Can be an array (by setting "filter[]") or single value
(by setting "filter")
"""
if filter is not None:
params["filter"] = filter
# OPTIONAL - filter_mode
"""
When filter[] contains multiple filters, combine them with this mode,
filtering conversations that at have at least all of the contexts ("and")
or at least one of the contexts ("or")
"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
params["filter_mode"] = filter_mode
# OPTIONAL - interleave_submissions
"""
(Obsolete) Submissions are no
longer linked to conversations. This parameter is ignored.
"""
if interleave_submissions is not None:
params["interleave_submissions"] = interleave_submissions
# OPTIONAL - include_all_conversation_ids
"""
Default is false. If true,
the top-level element of the response will be an object rather than
an array, and will have the keys "conversations" which will contain the
paged conversation data, and "conversation_ids" which will contain the
ids of all conversations under this scope/filter in the same order.
"""
if include_all_conversation_ids is not None:
params["include_all_conversation_ids"] = include_all_conversation_ids
# OPTIONAL - include
"""
"participant_avatars":: Optionally include an "avatar_url" key for each user participanting in the conversation
"""
if include is not None:
self._validate_enum(include, ["participant_avatars"])
params["include"] = include
self.logger.debug(
"GET /api/v1/conversations with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/conversations".format(**path),
data=data,
params=params,
all_pages=True,
)
def create_conversation(
self,
body,
recipients,
attachment_ids=None,
context_code=None,
filter=None,
filter_mode=None,
force_new=None,
group_conversation=None,
media_comment_id=None,
media_comment_type=None,
mode=None,
scope=None,
subject=None,
user_note=None,
):
"""
Create a conversation.
Create a new conversation with one or more recipients. If there is already
an existing private conversation with the given recipients, it will be
reused.
"""
path = {}
data = {}
params = {}
# REQUIRED - recipients
"""
An array of recipient ids. These may be user ids or course/group ids
prefixed with "course_" or "group_" respectively, e.g.
recipients[]=1&recipients[]=2&recipients[]=course_3. If the course/group
has over 100 enrollments, 'bulk_message' and 'group_conversation' must be
set to true.
"""
data["recipients"] = recipients
# OPTIONAL - subject
"""
The subject of the conversation. This is ignored when reusing a
conversation. Maximum length is 255 characters.
"""
if subject is not None:
data["subject"] = subject
# REQUIRED - body
"""
The message to be sent
"""
data["body"] = body
# OPTIONAL - force_new
"""
Forces a new message to be created, even if there is an existing private conversation.
"""
if force_new is not None:
data["force_new"] = force_new
# OPTIONAL - group_conversation
"""
Defaults to false. When false, individual private conversations will be
created with each recipient. If true, this will be a group conversation
(i.e. all recipients may see all messages and replies). Must be set true if
the number of recipients is over the set maximum (default is 100).
"""
if group_conversation is not None:
data["group_conversation"] = group_conversation
# OPTIONAL - attachment_ids
"""
An array of attachments ids. These must be files that have been previously
uploaded to the sender's "conversation attachments" folder.
"""
if attachment_ids is not None:
data["attachment_ids"] = attachment_ids
# OPTIONAL - media_comment_id
"""
Media comment id of an audio of video file to be associated with this
message.
"""
if media_comment_id is not None:
data["media_comment_id"] = media_comment_id
# OPTIONAL - media_comment_type
"""
Type of the associated media file
"""
if media_comment_type is not None:
self._validate_enum(media_comment_type, ["audio", "video"])
data["media_comment_type"] = media_comment_type
# OPTIONAL - user_note
"""
Will add a faculty journal entry for each recipient as long as the user
making the api call has permission, the recipient is a student and
faculty journals are enabled in the account.
"""
if user_note is not None:
data["user_note"] = user_note
# OPTIONAL - mode
"""
Determines whether the messages will be created/sent synchronously or
asynchronously. Defaults to sync, and this option is ignored if this is a
group conversation or there is just one recipient (i.e. it must be a bulk
private message). When sent async, the response will be an empty array
(batch status can be queried via the {api:ConversationsController#batches batches API})
"""
if mode is not None:
self._validate_enum(mode, ["sync", "async"])
data["mode"] = mode
# OPTIONAL - scope
"""
Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}
"""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
data["scope"] = scope
# OPTIONAL - filter
"""
Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}
"""
if filter is not None:
data["filter"] = filter
# OPTIONAL - filter_mode
"""
Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}
"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
data["filter_mode"] = filter_mode
# OPTIONAL - context_code
"""
The course or group that is the context for this conversation. Same format
as courses or groups in the recipients argument.
"""
if context_code is not None:
data["context_code"] = context_code
self.logger.debug(
"POST /api/v1/conversations with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/conversations".format(**path),
data=data,
params=params,
no_data=True,
)
def get_running_batches(self):
"""
Get running batches.
Returns any currently running conversation batches for the current user.
Conversation batches are created when a bulk private message is sent
asynchronously (see the mode argument to the {api:ConversationsController#create create API action}).
"""
path = {}
data = {}
params = {}
self.logger.debug(
"GET /api/v1/conversations/batches with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/conversations/batches".format(**path),
data=data,
params=params,
no_data=True,
)
def get_single_conversation(
self,
id,
auto_mark_as_read=None,
filter=None,
filter_mode=None,
interleave_submissions=None,
scope=None,
):
"""
Get a single conversation.
Returns information for a single conversation for the current user. Response includes all
fields that are present in the list/index action as well as messages
and extended participant information.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# OPTIONAL - interleave_submissions
"""
(Obsolete) Submissions are no
longer linked to conversations. This parameter is ignored.
"""
if interleave_submissions is not None:
params["interleave_submissions"] = interleave_submissions
# OPTIONAL - scope
"""
Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}
"""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
params["scope"] = scope
# OPTIONAL - filter
"""
Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}
"""
if filter is not None:
params["filter"] = filter
# OPTIONAL - filter_mode
"""
Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}
"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
params["filter_mode"] = filter_mode
# OPTIONAL - auto_mark_as_read
"""
Default true. If true, unread
conversations will be automatically marked as read. This will default
to false in a future API release, so clients should explicitly send
true if that is the desired behavior.
"""
if auto_mark_as_read is not None:
params["auto_mark_as_read"] = auto_mark_as_read
self.logger.debug(
"GET /api/v1/conversations/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/conversations/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
def edit_conversation(
self,
id,
conversation_starred=None,
conversation_subscribed=None,
conversation_workflow_state=None,
filter=None,
filter_mode=None,
scope=None,
):
"""
Edit a conversation.
Updates attributes for a single conversation.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# OPTIONAL - conversation[workflow_state]
"""
Change the state of this conversation
"""
if conversation_workflow_state is not None:
self._validate_enum(
conversation_workflow_state, ["read", "unread", "archived"]
)
data["conversation[workflow_state]"] = conversation_workflow_state
# OPTIONAL - conversation[subscribed]
"""
Toggle the current user's subscription to the conversation (only valid for
group conversations). If unsubscribed, the user will still have access to
the latest messages, but the conversation won't be automatically flagged
as unread, nor will it jump to the top of the inbox.
"""
if conversation_subscribed is not None:
data["conversation[subscribed]"] = conversation_subscribed
# OPTIONAL - conversation[starred]
"""
Toggle the starred state of the current user's view of the conversation.
"""
if conversation_starred is not None:
data["conversation[starred]"] = conversation_starred
# OPTIONAL - scope
"""
Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}
"""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
data["scope"] = scope
# OPTIONAL - filter
"""
Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}
"""
if filter is not None:
data["filter"] = filter
# OPTIONAL - filter_mode
"""
Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}
"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
data["filter_mode"] = filter_mode
self.logger.debug(
"PUT /api/v1/conversations/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/conversations/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
def mark_all_as_read(self):
"""
Mark all as read.
Mark all conversations as read.
"""
path = {}
data = {}
params = {}
self.logger.debug(
"POST /api/v1/conversations/mark_all_as_read with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/conversations/mark_all_as_read".format(**path),
data=data,
params=params,
no_data=True,
)
def delete_conversation(self, id):
"""
Delete a conversation.
Delete this conversation and its messages. Note that this only deletes
this user's view of the conversation.
Response includes same fields as UPDATE action
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"DELETE /api/v1/conversations/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/conversations/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
def add_recipients(self, id, recipients):
"""
Add recipients.
Add recipients to an existing group conversation. Response is similar to
the GET/show action, except that only includes the
latest message (e.g. "joe was added to the conversation by bob")
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# REQUIRED - recipients
"""
An array of recipient ids. These may be user ids or course/group ids
prefixed with "course_" or "group_" respectively, e.g.
recipients[]=1&recipients[]=2&recipients[]=course_3
"""
data["recipients"] = recipients
self.logger.debug(
"POST /api/v1/conversations/{id}/add_recipients with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/conversations/{id}/add_recipients".format(**path),
data=data,
params=params,
no_data=True,
)
def add_message(
self,
body,
id,
attachment_ids=None,
included_messages=None,
media_comment_id=None,
media_comment_type=None,
recipients=None,
user_note=None,
):
"""
Add a message.
Add a message to an existing conversation. Response is similar to the
GET/show action, except that only includes the
latest message (i.e. what we just sent)
An array of user ids. Defaults to all of the current conversation
recipients. To explicitly send a message to no other recipients,
this array should consist of the logged-in user id.
An array of message ids from this conversation to send to recipients
of the new message. Recipients who already had a copy of included
messages will not be affected.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# REQUIRED - body
"""
The message to be sent.
"""
data["body"] = body
# OPTIONAL - attachment_ids
"""
An array of attachments ids. These must be files that have been previously
uploaded to the sender's "conversation attachments" folder.
"""
if attachment_ids is not None:
data["attachment_ids"] = attachment_ids
# OPTIONAL - media_comment_id
"""
Media comment id of an audio of video file to be associated with this
message.
"""
if media_comment_id is not None:
data["media_comment_id"] = media_comment_id
# OPTIONAL - media_comment_type
"""
Type of the associated media file.
"""
if media_comment_type is not None:
self._validate_enum(media_comment_type, ["audio", "video"])
data["media_comment_type"] = media_comment_type
# OPTIONAL - recipients
"""
no description
"""
if recipients is not None:
data["recipients"] = recipients
# OPTIONAL - included_messages
"""
no description
"""
if included_messages is not None:
data["included_messages"] = included_messages
# OPTIONAL - user_note
"""
Will add a faculty journal entry for each recipient as long as the user
making the api call has permission, the recipient is a student and
faculty journals are enabled in the account.
"""
if user_note is not None:
data["user_note"] = user_note
self.logger.debug(
"POST /api/v1/conversations/{id}/add_message with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/conversations/{id}/add_message".format(**path),
data=data,
params=params,
no_data=True,
)
def delete_message(self, id, remove):
"""
Delete a message.
Delete messages from this conversation. Note that this only affects this
user's view of the conversation. If all messages are deleted, the
conversation will be as well (equivalent to DELETE)
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# REQUIRED - remove
"""
Array of message ids to be deleted
"""
data["remove"] = remove
self.logger.debug(
"POST /api/v1/conversations/{id}/remove_messages with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/conversations/{id}/remove_messages".format(**path),
data=data,
params=params,
no_data=True,
)
def batch_update_conversations(self, conversation_ids, event):
"""
Batch update conversations.
Perform a change on a set of conversations. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation.
"""
path = {}
data = {}
params = {}
# REQUIRED - conversation_ids
"""
List of conversations to update. Limited to 500 conversations.
"""
data["conversation_ids"] = conversation_ids
# REQUIRED - event
"""
The action to take on each conversation.
"""
self._validate_enum(
event,
["mark_as_read", "mark_as_unread", "star", "unstar", "archive", "destroy"],
)
data["event"] = event
self.logger.debug(
"PUT /api/v1/conversations with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/conversations".format(**path),
data=data,
params=params,
single_item=True,
)
def find_recipients(self):
"""
Find recipients.
Deprecated, see the {api:SearchController#recipients Find recipients endpoint} in the Search API
"""
path = {}
data = {}
params = {}
self.logger.debug(
"GET /api/v1/conversations/find_recipients with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/conversations/find_recipients".format(**path),
data=data,
params=params,
no_data=True,
)
def unread_count(self):
"""
Unread count.
Get the number of unread conversations for the current user
"""
path = {}
data = {}
params = {}
self.logger.debug(
"GET /api/v1/conversations/unread_count with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/conversations/unread_count".format(**path),
data=data,
params=params,
no_data=True,
)
class Conversation(BaseModel):
"""Conversation Model."""
def __init__(
self,
id=None,
subject=None,
workflow_state=None,
last_message=None,
start_at=None,
message_count=None,
subscribed=None,
private=None,
starred=None,
properties=None,
audience=None,
audience_contexts=None,
avatar_url=None,
participants=None,
visible=None,
context_name=None,
):
"""Init method for Conversation class."""
self._id = id
self._subject = subject
self._workflow_state = workflow_state
self._last_message = last_message
self._start_at = start_at
self._message_count = message_count
self._subscribed = subscribed
self._private = private
self._starred = starred
self._properties = properties
self._audience = audience
self._audience_contexts = audience_contexts
self._avatar_url = avatar_url
self._participants = participants
self._visible = visible
self._context_name = context_name
self.logger = logging.getLogger("py3canvas.Conversation")
@property
def id(self):
"""the unique identifier for the conversation."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def subject(self):
"""the subject of the conversation."""
return self._subject
@subject.setter
def subject(self, value):
"""Setter for subject property."""
self.logger.warn(
"Setting values on subject will NOT update the remote Canvas instance."
)
self._subject = value
@property
def workflow_state(self):
"""The current state of the conversation (read, unread or archived)."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn(
"Setting values on workflow_state will NOT update the remote Canvas instance."
)
self._workflow_state = value
@property
def last_message(self):
"""A <=100 character preview from the most recent message."""
return self._last_message
@last_message.setter
def last_message(self, value):
"""Setter for last_message property."""
self.logger.warn(
"Setting values on last_message will NOT update the remote Canvas instance."
)
self._last_message = value
@property
def start_at(self):
"""the date and time at which the last message was sent."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn(
"Setting values on start_at will NOT update the remote Canvas instance."
)
self._start_at = value
@property
def message_count(self):
"""the number of messages in the conversation."""
return self._message_count
@message_count.setter
def message_count(self, value):
"""Setter for message_count property."""
self.logger.warn(
"Setting values on message_count will NOT update the remote Canvas instance."
)
self._message_count = value
@property
def subscribed(self):
"""whether the current user is subscribed to the conversation."""
return self._subscribed
@subscribed.setter
def subscribed(self, value):
"""Setter for subscribed property."""
self.logger.warn(
"Setting values on subscribed will NOT update the remote Canvas instance."
)
self._subscribed = value
@property
def private(self):
"""whether the conversation is private."""
return self._private
@private.setter
def private(self, value):
"""Setter for private property."""
self.logger.warn(
"Setting values on private will NOT update the remote Canvas instance."
)
self._private = value
@property
def starred(self):
"""whether the conversation is starred."""
return self._starred
@starred.setter
def starred(self, value):
"""Setter for starred property."""
self.logger.warn(
"Setting values on starred will NOT update the remote Canvas instance."
)
self._starred = value
@property
def properties(self):
"""Additional conversation flags (last_author, attachments, media_objects). Each listed property means the flag is set to true (i.e. the current user is the most recent author, there are attachments, or there are media objects)."""
return self._properties
@properties.setter
def properties(self, value):
"""Setter for properties property."""
self.logger.warn(
"Setting values on properties will NOT update the remote Canvas instance."
)
self._properties = value
@property
def audience(self):
"""Array of user ids who are involved in the conversation, ordered by participation level, then alphabetical. Excludes current user, unless this is a monologue."""
return self._audience
@audience.setter
def audience(self, value):
"""Setter for audience property."""
self.logger.warn(
"Setting values on audience will NOT update the remote Canvas instance."
)
self._audience = value
@property
def audience_contexts(self):
"""Most relevant shared contexts (courses and groups) between current user and other participants. If there is only one participant, it will also include that user's enrollment(s)/ membership type(s) in each course/group."""
return self._audience_contexts
@audience_contexts.setter
def audience_contexts(self, value):
"""Setter for audience_contexts property."""
self.logger.warn(
"Setting values on audience_contexts will NOT update the remote Canvas instance."
)
self._audience_contexts = value
@property
def avatar_url(self):
"""URL to appropriate icon for this conversation (custom, individual or group avatar, depending on audience)."""
return self._avatar_url
@avatar_url.setter
def avatar_url(self, value):
"""Setter for avatar_url property."""
self.logger.warn(
"Setting values on avatar_url will NOT update the remote Canvas instance."
)
self._avatar_url = value
@property
def participants(self):
"""Array of users participating in the conversation. Includes current user."""
return self._participants
@participants.setter
def participants(self, value):
"""Setter for participants property."""
self.logger.warn(
"Setting values on participants will NOT update the remote Canvas instance."
)
self._participants = value
@property
def visible(self):
"""indicates whether the conversation is visible under the current scope and filter. This attribute is always true in the index API response, and is primarily useful in create/update responses so that you can know if the record should be displayed in the UI. The default scope is assumed, unless a scope or filter is passed to the create/update API call."""
return self._visible
@visible.setter
def visible(self, value):
"""Setter for visible property."""
self.logger.warn(
"Setting values on visible will NOT update the remote Canvas instance."
)
self._visible = value
@property
def context_name(self):
"""Name of the course or group in which the conversation is occurring."""
return self._context_name
@context_name.setter
def context_name(self, value):
"""Setter for context_name property."""
self.logger.warn(
"Setting values on context_name will NOT update the remote Canvas instance."
)
self._context_name = value
class Conversationparticipant(BaseModel):
"""Conversationparticipant Model."""
def __init__(self, id=None, name=None, full_name=None, avatar_url=None):
"""Init method for Conversationparticipant class."""
self._id = id
self._name = name
self._full_name = full_name
self._avatar_url = avatar_url
self.logger = logging.getLogger("py3canvas.Conversationparticipant")
@property
def id(self):
"""The user ID for the participant."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def name(self):
"""A short name the user has selected, for use in conversations or other less formal places through the site."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn(
"Setting values on name will NOT update the remote Canvas instance."
)
self._name = value
@property
def full_name(self):
"""The full name of the user."""
return self._full_name
@full_name.setter
def full_name(self, value):
"""Setter for full_name property."""
self.logger.warn(
"Setting values on full_name will NOT update the remote Canvas instance."
)
self._full_name = value
@property
def avatar_url(self):
"""If requested, this field will be included and contain a url to retrieve the user's avatar."""
return self._avatar_url
@avatar_url.setter
def avatar_url(self, value):
"""Setter for avatar_url property."""
self.logger.warn(
"Setting values on avatar_url will NOT update the remote Canvas instance."
)
self._avatar_url = value
|
#!/usr/bin/env python
"""
Project1 is Steve's example project for the blog.
It contains sample code here and there.
"""
|
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Schemas test."""
import json
import logging
import os
from marshmallow import ValidationError
from qiskit.qobj._schema_validation import (validate_json_against_schema,
_get_validator)
from qiskit.providers.models import (BackendConfiguration, BackendProperties,
BackendStatus, JobStatus)
from qiskit.result import Result
from qiskit.test import QiskitTestCase, Path
logger = logging.getLogger(__name__)
class TestSchemaExamples(QiskitTestCase):
"""
Tests schema validation
"""
_json_examples_per_schema = {
"backend_configuration": [
"backend_configuration_openpulse_example.json",
"backend_configuration_openqasm_example.json",
"backend_configuration_openqasm_simulator_example.json"],
"backend_properties": [
"backend_properties_example.json"],
"backend_status": [
"backend_status_example.json"],
"default_pulse_configuration": [
"default_pulse_configuration_example.json"],
"job_status": [
"job_status_example.json"],
"qobj": [
"qobj_openpulse_example.json",
"qobj_openqasm_example.json"],
"result": [
"result_openqasm_example.json",
"result_openpulse_level_0_example.json",
"result_openpulse_level_1_example.json",
"result_statevector_simulator_example.json",
"result_unitary_simulator_example.json"]
}
def setUp(self):
self.examples_base_path = self._get_resource_path('examples',
Path.SCHEMAS)
def test_examples_are_valid(self):
"""Validate example json files against respective schemas"""
schemas = TestSchemaExamples._json_examples_per_schema
for schema_name, examples in schemas.items():
with self.subTest(schema_test=schema_name):
for example_schema in examples:
with self.subTest(example=example_schema):
with open(os.path.join(self.examples_base_path,
example_schema),
'r') as example_file:
example = json.load(example_file)
msg = 'JSON failed validation of {}.'\
'Set Qiskit log level to DEBUG'\
'for further information.'\
''.format(schema_name)
validate_json_against_schema(example,
schema_name, msg)
# TODO: temporary quick check for validating examples
# using the qiskit.validation-based Result.
try:
obj_map = {'result': Result,
'backend_configuration': BackendConfiguration,
'backend_properties': BackendProperties,
'backend_status': BackendStatus,
'job_status': JobStatus}
cls = obj_map.get(schema_name, None)
if cls and 'openpulse' not in example_schema:
_ = cls.from_dict(example)
except ValidationError as ex:
logger.debug(example_schema, ex)
def test_schemas_are_valid(self):
"""Validate that schemas are valid jsonschema"""
schemas = TestSchemaExamples._json_examples_per_schema
for schema_name in schemas:
with self.subTest(schema_test=schema_name):
_get_validator(schema_name, check_schema=True)
|
#!/usr/bin/env python
'''@package docstring
Just a giant list of processes and properties
'''
processes = {
# inclusive NLO V+jets
'DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZJets_nlo','MC',6025.2),
'DYJetsToNuNu_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZtoNuNu_nlo','MC',11433.),
'WJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('WJets_nlo','MC',61527.),
# LO Z->nunu
'ZJetsToNuNu_HT-100To200_13TeV-madgraph':('ZtoNuNu_ht100to200','MC',280.5),
'ZJetsToNuNu_HT-200To400_13TeV-madgraph':('ZtoNuNu_ht200to400','MC',77.7),
'ZJetsToNuNu_HT-400To600_13TeV-madgraph':('ZtoNuNu_ht400to600','MC',10.71),
'ZJetsToNuNu_HT-600To800_13TeV-madgraph':('ZtoNuNu_ht600to800','MC',2.562),
'ZJetsToNuNu_HT-800To1200_13TeV-madgraph':('ZtoNuNu_ht800to1200','MC',1.183),
'ZJetsToNuNu_HT-1200To2500_13TeV-madgraph':('ZtoNuNu_ht1200to2500','MC',0.286),
'ZJetsToNuNu_HT-2500ToInf_13TeV-madgraph':('ZtoNuNu_ht2500toinf','MC',0.006945),
'ZJetsToNuNu_HT-600ToInf_13TeV-madgraph':('ZtoNuNu_ht600toinf','MC',4.098),
# LO Z->ll
'DYJetsToLL_M-50_HT-70to100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZJets_ht50to100','MC',175.3),
'DYJetsToLL_M-50_HT-100to200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZJets_ht100to200','MC',148.),
'DYJetsToLL_M-50_HT-200to400_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZJets_ht200to400','MC',40.94),
'DYJetsToLL_M-50_HT-400to600_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZJets_ht400to600','MC',5.497),
'DYJetsToLL_M-50_HT-600toInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZJets_ht600toinf','MC',2.193),
'DYJetsToLL_M-50_HT-1200to2500_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZJets_ht1200to2500','MC',0.1514),
'DYJetsToLL_M-50_HT-2500toInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZJets_ht2500toinf','MC',0.003565),
'DYJetsToLL_M-50_HT-600to800_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZJets_ht600to800','MC',1.367),
'DYJetsToLL_M-50_HT-800to1200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZJets_ht800to1200','MC',0.6304),
# LO W->lnu
'WJetsToLNu_HT-70To100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('WJets_ht70to100','MC',1319),
'WJetsToLNu_HT-100To200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('WJets_ht100to200','MC',1343),
'WJetsToLNu_HT-200To400_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('WJets_ht200to400','MC',359.6),
'WJetsToLNu_HT-400To600_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('WJets_ht400to600','MC',48.85),
'WJetsToLNu_HT-600To800_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('WJets_ht600to800','MC',12.05),
'WJetsToLNu_HT-800To1200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('WJets_ht800to1200','MC',5.501),
'WJetsToLNu_HT-1200To2500_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('WJets_ht1200to2500','MC',1.329),
'WJetsToLNu_HT-2500ToInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('WJets_ht2500toinf','MC',0.03216),
'WJetsToLNu_HT-600ToInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('WJets_ht600toinf','MC',18.91),
# NLO W->lnu
'WJetsToLNu_Pt-100To250_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('WJets_pt100to250','MC',677.82),
'WJetsToLNu_Pt-250To400_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('WJets_pt250to400','MC',24.083),
'WJetsToLNu_Pt-400To600_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('WJets_pt400to600','MC',3.0563),
'WJetsToLNu_Pt-600ToInf_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('WJets_pt600toinf','MC',0.4602),
# NLO Z->ll
'DYJetsToLL_Pt-50To100_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZJets_pt50to100','MC',374.6800),
'DYJetsToLL_Pt-100To250_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZJets_pt100to250','MC',86.5200),
'DYJetsToLL_Pt-250To400_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZJets_pt250to400','MC',3.3247),
'DYJetsToLL_Pt-400To650_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZJets_pt400to650','MC',0.4491),
'DYJetsToLL_Pt-650ToInf_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZJets_pt650toinf','MC',0.0422),
# NLO Z->nunu
'DYJetsToNuNu_PtZ-100To250_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZtoNuNu_pt100to250','MC',3*54.8229),
'DYJetsToNuNu_PtZ-250To400_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZtoNuNu_pt250to400','MC',3*2.0705),
'DYJetsToNuNu_PtZ-400To650_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZtoNuNu_pt400to650','MC',3*0.2779),
'DYJetsToNuNu_PtZ-650ToInf_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8':('ZtoNuNu_pt650toinf','MC',3*0.0261),
# LO gamma
'GJets_HT-40To100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('GJets_ht40to100','MC',23080.0),
'GJets_HT-100To200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('GJets_ht100to200','MC',9235),
'GJets_HT-200To400_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('GJets_ht200to400','MC',2298),
'GJets_HT-400To600_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('GJets_ht400to600','MC',277.6),
'GJets_HT-600ToInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('GJets_ht600toinf','MC',93.47),
# QCD
'QCD_HT50to100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('QCD_ht50to100','MC',278700000),
'QCD_HT100to200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('QCD_ht100to200','MC',27990000),
'QCD_HT200to300_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('QCD_ht200to300','MC',1735000),
'QCD_HT300to500_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('QCD_ht300to500','MC',366800),
'QCD_HT500to700_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('QCD_ht500to700','MC',29370),
'QCD_HT700to1000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('QCD_ht700to1000','MC',6524),
'QCD_HT1000to1500_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('QCD_ht1000to1500','MC',1064),
'QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('QCD_ht1500to2000','MC',121.5),
'QCD_HT2000toInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('QCD_ht2000toinf','MC',25.42),
# Single tops
'ST_t-channel_antitop_4f_leptonDecays_13TeV-powheg-pythia8':('SingleTop_tTbar_lep','MC',26.22),
'ST_t-channel_top_4f_leptonDecays_13TeV-powheg-pythia8':('SingleTop_tT_lep','MC',44.07),
'ST_tW_top_5f_inclusiveDecays_13TeV-powheg-pythia8_TuneCUETP8M1':('SingleTop_tW','MC',35.85),
'ST_tW_antitop_5f_inclusiveDecays_13TeV-powheg-pythia8_TuneCUETP8M1':('SingleTop_tbarW','MC',35.85),
'ST_t-channel_antitop_4f_inclusiveDecays_13TeV-powhegV2-madspin-pythia8_TuneCUETP8M1':('SingleTop_tTbar','MC',80.95),
'ST_t-channel_top_4f_inclusiveDecays_13TeV-powhegV2-madspin-pythia8_TuneCUETP8M1':('SingleTop_tT','MC',136.02),
'ST_s-channel_4f_leptonDecays_13TeV-amcatnlo-pythia8_TuneCUETP8M1':('SingleTop_s_lep','MC',10.11),
'ST_t-channel_5f_leptonDecays_13TeV-amcatnlo-pythia8_TuneCUETP8M1':('SingleTop_t_lep','MC',216.99),
'ST_t-channel_top_4f_leptonDecays_13TeV-powheg-pythia8_TuneCUETP8M1':('SingleTop_tchannel','MC',44.3),
# ttbar
'TTJets_TuneCUETP8M1_13TeV-madgraphMLM-pythia8': ('TTbar_MLM','MC',831.76),
'TTJets_DiLept_TuneCUETP8M1_13TeV-madgraphMLM-pythia8': ('TTbar_2L','MC',831.76*(1-0.68)*(1-0.68)),
'TTJets_SingleLeptFromT_TuneCUETP8M1_13TeV-madgraphMLM-pythia8': ('TTbar_1LT','MC',831.76*0.68*(1-0.68)),
'TTJets_SingleLeptFromTbar_TuneCUETP8M1_13TeV-madgraphMLM-pythia8': ('TTbar_1LTbar','MC',831.76*0.68*(1-0.68)),
'TTJets_SingleLeptFromT_TuneCUETP8M2T4_13TeV-amcatnloFXFX-pythia8': ('TTbar_FXFX_1LT','MC',831.76*0.68*(1-0.68)),
'TTJets_SingleLeptFromTbar_TuneCUETP8M2T4_13TeV-amcatnloFXFX-pythia8': ('TTbar_FXFX_1LTbar','MC',831.76*0.68*(1-0.68)),
'TT_TuneEE5C_13TeV-powheg-herwigpp': ('TTbar_Herwig','MC',831.76),
'TTJets_TuneCUETP8M2T4_13TeV-amcatnloFXFX-pythia8': ('TTbar_FXFX','MC',831.76),
'TT_TuneCUETP8M2T4_13TeV-powheg-pythia8': ('TTbar_Powheg','MC',831.76),
'TT_TuneCUETP8M2T4_13TeV-powheg-isrdown-pythia8': ('TTbar_PowhegISRDown','MC',831.76),
'TT_TuneCUETP8M2T4_13TeV-powheg-isrup-pythia8': ('TTbar_PowhegISRUp','MC',831.76),
'TT_TuneCUETP8M2T4down_13TeV-powheg-pythia8': ('TTbar_PowhegTuneDown','MC',831.76),
'TT_TuneCUETP8M2T4up_13TeV-powheg-pythia8': ('TTbar_PowhegTuneUp','MC',831.76),
'TTTo2L2Nu_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8':('TTTo2L2Nu','MC',88.288),
# exotic top
'tZq_ll_4f_13TeV-amcatnlo-pythia8':('SingleTop_tZll','MC',0.0758),
'tZq_nunu_4f_13TeV-amcatnlo-pythia8_TuneCUETP8M1':('SingleTop_tZnunu','MC',0.1379),
'TTGJets_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8':('TTbar_GJets','MC',3.786),
'TGJets_TuneCUETP8M1_13TeV_amcatnlo_madspin_pythia8':('SingleTop_tG','MC',2.967),
# EWK V+jets
'EWKZ2Jets_ZToNuNu_13TeV-madgraph-pythia8':('ZtoNuNu_EWK','MC',10.04),
'EWKZ2Jets_ZToLL_M-50_13TeV-madgraph-pythia8':('ZJets_EWK','MC',3.99),
'EWKWPlus2Jets_WToLNu_M-50_13TeV-madgraph-pythia8':('WJets_EWKWPlus','MC',25.81),
'EWKWMinus2Jets_WToLNu_M-50_13TeV-madgraph-pythia8':('WJets_EWKWMinus','MC',20.35),
# regular dibosons
'WW_TuneCUETP8M1_13TeV-pythia8':('Diboson_ww','MC',118.7),
'WZ_TuneCUETP8M1_13TeV-pythia8':('Diboson_wz','MC',47.13),
'ZZ_TuneCUETP8M1_13TeV-pythia8':('Diboson_zz','MC',16.523),
# fancy dibosons
'WWTo2L2Nu_13TeV-powheg':('WWTo2L2Nu','MC',(118.7-3.974)*0.1086*0.1086*9), #12.178
'WWTo4Q_13TeV-powheg':('WWTo4Q','MC',51.723),
'WWToLNuQQ_13TeV-powheg':('WWToLNuQQ','MC',49.997),
'WZTo1L1Nu2Q_13TeV_amcatnloFXFX_madspin_pythia8':('WZTo1L1Nu2Q','MC',10.71),
'WZTo1L3Nu_13TeV_amcatnloFXFX_madspin_pythia8':('WZTo1L3Nu','MC',3.033),
'WZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8':('WZTo2L2Q','MC',5.595),
'WZTo3LNu_TuneCUETP8M1_13TeV-powheg-pythia8':('WZTo3LNu','MC',4.430),
'ZZTo2L2Nu_13TeV_powheg_pythia8':('ZZTo2L2Nu','MC',0.5644),
'ZZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8':('ZZTo2L2Q','MC',3.22),
'ZZTo4L_13TeV_powheg_pythia8':('ZZTo4L','MC',1.212),
'GluGluToContinToZZTo2e2nu_13TeV_MCFM701_pythia8':('ggZZTo2e2nu','MC',0.003956),
'GluGluToContinToZZTo2mu2nu_13TeV_MCFM701_pythia8':('ggZZTo2mu2tau','MC',0.003956),
'GluGluToContinToZZTo2e2mu_13TeV_MCFM701_pythia8':('ggZZTo2e2mu','MC',0.0073462),
'GluGluToContinToZZTo2e2tau_13TeV_MCFM701_pythia8':('ggZZTo2e2tau','MC',0.0073462),
'GluGluToContinToZZTo2mu2tau_13TeV_MCFM701_pythia8':('ggZZTo2mu2tau','MC',0.0073462),
'GluGluToContinToZZTo4e_13TeV_MCFM701_pythia8':('ggZZTo4e','MC',0.0036478),
'GluGluToContinToZZTo4mu_13TeV_MCFM701_pythia8':('ggZZTo4mu','MC',0.0036478),
'GluGluToContinToZZTo4tau_13TeV_MCFM701_pythia8':('ggZZTo4tau','MC',0.0036478),
# Higgs->bb
#'ZH_HToBB_ZToNuNu_M125_13TeV_powheg_pythia8':('ZnunuH','MC',0.08912),
'ZH_HToBB_ZToNuNu_M125_13TeV_powheg_pythia8':('ZnunuH','MC',0.884*0.582*0.20),
'ZH_HToBB_ZToLL_M125_13TeV_powheg_pythia8':('ZllH','MC',0.04865),
'ZH_HToBB_ZToQQ_M125_13TeV_powheg_pythia8':('ZqqH','MC',0.607*0.582),
'ggZH_HToBB_ZToNuNu_M125_13TeV_powheg_pythia8':('ggZnunuH','MC',0.014366),
'ggZH_HToBB_ZToLL_M125_13TeV_powheg_pythia8':('ggZllH','MC',0.007842),
'WminusH_HToBB_WToLNu_M125_13TeV_powheg_pythia8':('WmH','MC',0.100),
'WplusH_HToBB_WToLNu_M125_13TeV_powheg_pythia8':('WpH','MC',0.159),
'WplusH_HToBB_WToQQ_M125_13TeV_powheg_pythia8':('WpH_qq','MC',0.84*(1-3*0.108)*0.582),
'WminusH_HToBB_WToQQ_M125_13TeV_powheg_pythia8':('WmH_qq','MC',0.533*(1-3*0.108)*0.582),
'WWTo4Q_4f_13TeV_amcatnloFXFX_madspin_pythia8':('WW_4q','MC',51.723),
'WJetsToQQ_HT180_13TeV-madgraphMLM-pythia8':('WJets_qq','MC',2788),
'ZZTo4Q_13TeV_amcatnloFXFX_madspin_pythia8':('ZZ_4q','MC',6.842),
'DYJetsToQQ_HT180_13TeV-madgraphMLM-pythia8':('ZJets_qq','MC',1187),
'ttHTobb_M125_13TeV_powheg_pythia8':('ttH','MC',0.506*0.5824),
'ttHTobb_M125_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8':('ttHtranche3','MC',0.506*0.5824),
'GluGluHToBB_M125_13TeV_powheg_pythia8':('ggH','MC',48.48*0.5824),
'VBFHToBB_M125_13TeV_amcatnlo_pythia8':('VBFH','MC',3.782*0.5824),
'VBFHToBB_M-125_13TeV_powheg_pythia8_weightfix':('VBFH_powheg','MC',3.782*0.5824),
}
|
"""Livestreamer extracts streams from various services.
The main compontent of Livestreamer is a CLI program that launches
streams the streams in a video player.
An API is also provided that allows direct access to stream data.
Full documentation is available at http://livestreamer.tanuki.se/.
"""
__title__ = "livestreamer"
__version__ = "1.7.2"
__license__ = "Simplified BSD"
__author__ = "Christopher Rosell"
__copyright__ = "Copyright 2011-2014 Christopher Rosell"
__credits__ = ["Christopher Rosell", "Athanasios Oikonomou",
"Gaspard Jankowiak", "Dominik Dabrowski",
"Toad King", "Niall McAndrew", "Daniel Wallace",
"Sam Edwards", "John Peterson", "Kacper"]
from .exceptions import (LivestreamerError, PluginError, NoStreamsError,
NoPluginError, StreamError)
from .session import Livestreamer
|
#
# Copyright (c) 2021 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import dataclass
from typing import Any
import typing
from chip import ChipDeviceCtrl
from chip import ChipCommissionableNodeCtrl
import chip.interaction_model as IM
import threading
import os
import sys
import logging
import time
import ctypes
logger = logging.getLogger('PythonMatterControllerTEST')
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter(
'%(asctime)s [%(name)s] %(levelname)s %(message)s'))
sh.setStream(sys.stdout)
logger.addHandler(sh)
def TestFail(message):
logger.fatal("Testfail: {}".format(message))
os._exit(1)
def FailIfNot(cond, message):
if not cond:
TestFail(message)
class TestTimeout(threading.Thread):
def __init__(self, timeout: int):
threading.Thread.__init__(self)
self._timeout = timeout
self._should_stop = False
self._cv = threading.Condition()
def stop(self):
with self._cv:
self._should_stop = True
self._cv.notify_all()
self.join()
def run(self):
stop_time = time.time() + self._timeout
logger.info("Test timeout set to {} seconds".format(self._timeout))
with self._cv:
wait_time = stop_time - time.time()
while wait_time > 0 and not self._should_stop:
self._cv.wait(wait_time)
wait_time = stop_time - time.time()
if time.time() > stop_time:
TestFail("Timeout")
class TestResult:
def __init__(self, operationName, result):
self.operationName = operationName
self.result = result
def assertStatusEqual(self, expected):
if self.result is None:
raise Exception(f"{self.operationName}: no result got")
if self.result.status != expected:
raise Exception(
f"{self.operationName}: expected status {expected}, got {self.result.status}")
return self
def assertValueEqual(self, expected):
self.assertStatusEqual(0)
if self.result is None:
raise Exception(f"{self.operationName}: no result got")
if self.result.value != expected:
raise Exception(
f"{self.operationName}: expected value {expected}, got {self.result.value}")
return self
class BaseTestHelper:
def __init__(self, nodeid: int):
self.devCtrl = ChipDeviceCtrl.ChipDeviceController(
controllerNodeId=nodeid)
self.logger = logger
self.commissionableNodeCtrl = ChipCommissionableNodeCtrl.ChipCommissionableNodeController()
def _WaitForOneDiscoveredDevice(self, timeoutSeconds: int = 2):
print("Waiting for device responses...")
strlen = 100
addrStrStorage = ctypes.create_string_buffer(strlen)
timeout = time.time() + timeoutSeconds
while (not self.devCtrl.GetIPForDiscoveredDevice(0, addrStrStorage, strlen) and time.time() <= timeout):
time.sleep(0.2)
if time.time() > timeout:
return None
return ctypes.string_at(addrStrStorage)
def TestDiscovery(self, discriminator: int):
self.logger.info(
f"Discovering commissionable nodes with discriminator {discriminator}")
self.devCtrl.DiscoverCommissionableNodesLongDiscriminator(
ctypes.c_uint16(int(discriminator)))
res = self._WaitForOneDiscoveredDevice()
if not res:
self.logger.info(
f"Device not found")
return False
self.logger.info(f"Found device at {res}")
return res
def TestKeyExchange(self, ip: str, setuppin: int, nodeid: int):
self.logger.info("Conducting key exchange with device {}".format(ip))
if not self.devCtrl.ConnectIP(ip.encode("utf-8"), setuppin, nodeid):
self.logger.info(
"Failed to finish key exchange with device {}".format(ip))
return False
self.logger.info("Device finished key exchange.")
return True
def TestCloseSession(self, nodeid: int):
self.logger.info(f"Closing sessions with device {nodeid}")
try:
self.devCtrl.CloseSession(nodeid)
return True
except Exception as ex:
self.logger.exception(
f"Failed to close sessions with device {nodeid}: {ex}")
return False
def TestNetworkCommissioning(self, nodeid: int, endpoint: int, group: int, dataset: str, network_id: str):
self.logger.info("Commissioning network to device {}".format(nodeid))
try:
self.devCtrl.ZCLSend("NetworkCommissioning", "AddThreadNetwork", nodeid, endpoint, group, {
"operationalDataset": bytes.fromhex(dataset),
"breadcrumb": 0,
"timeoutMs": 1000}, blocking=True)
except Exception as ex:
self.logger.exception("Failed to send AddThreadNetwork command")
return False
self.logger.info(
"Send EnableNetwork command to device {}".format(nodeid))
try:
self.devCtrl.ZCLSend("NetworkCommissioning", "EnableNetwork", nodeid, endpoint, group, {
"networkID": bytes.fromhex(network_id),
"breadcrumb": 0,
"timeoutMs": 1000}, blocking=True)
except Exception as ex:
self.logger.exception("Failed to send EnableNetwork command")
return False
return True
def TestOnOffCluster(self, nodeid: int, endpoint: int, group: int):
self.logger.info(
"Sending On/Off commands to device {} endpoint {}".format(nodeid, endpoint))
err, resp = self.devCtrl.ZCLSend("OnOff", "On", nodeid,
endpoint, group, {}, blocking=True)
if err != 0 or resp is None or resp.Status != 0:
self.logger.error(
"failed to send OnOff.On: error is {} with im response{}".format(err, resp))
return False
err, resp = self.devCtrl.ZCLSend("OnOff", "Off", nodeid,
endpoint, group, {}, blocking=True)
if err != 0 or resp is None or resp.Status != 0:
self.logger.error(
"failed to send OnOff.Off: error is {} with im response {}".format(err, resp))
return False
return True
def TestLevelControlCluster(self, nodeid: int, endpoint: int, group: int):
self.logger.info(
f"Sending MoveToLevel command to device {nodeid} endpoint {endpoint}")
try:
commonArgs = dict(transitionTime=0, optionMask=0, optionOverride=0)
# Move to 0
self.devCtrl.ZCLSend("LevelControl", "MoveToLevel", nodeid,
endpoint, group, dict(**commonArgs, level=0), blocking=True)
res = self.devCtrl.ZCLReadAttribute(cluster="LevelControl",
attribute="CurrentLevel",
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
TestResult("Read attribute LevelControl.CurrentLevel",
res).assertValueEqual(0)
# Move to 255
self.devCtrl.ZCLSend("LevelControl", "MoveToLevel", nodeid,
endpoint, group, dict(**commonArgs, level=255), blocking=True)
res = self.devCtrl.ZCLReadAttribute(cluster="LevelControl",
attribute="CurrentLevel",
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
TestResult("Read attribute LevelControl.CurrentLevel",
res).assertValueEqual(255)
return True
except Exception as ex:
self.logger.exception(f"Level cluster test failed: {ex}")
return False
def TestResolve(self, nodeid):
self.logger.info(
"Resolve: node id = {:08x}".format(nodeid))
try:
self.devCtrl.ResolveNode(nodeid=nodeid)
addr = self.devCtrl.GetAddressAndPort(nodeid)
if not addr:
return False
self.logger.info(f"Resolved address: {addr[0]}:{addr[1]}")
return True
except Exception as ex:
self.logger.exception("Failed to resolve. {}".format(ex))
return False
def TestReadBasicAttributes(self, nodeid: int, endpoint: int, group: int):
basic_cluster_attrs = {
"VendorName": "TEST_VENDOR",
"VendorID": 9050,
"ProductName": "TEST_PRODUCT",
"ProductID": 65279,
"UserLabel": "",
"Location": "",
"HardwareVersion": 0,
"HardwareVersionString": "TEST_VERSION",
"SoftwareVersion": 0,
"SoftwareVersionString": "prerelease",
}
failed_zcl = {}
for basic_attr, expected_value in basic_cluster_attrs.items():
try:
res = self.devCtrl.ZCLReadAttribute(cluster="Basic",
attribute=basic_attr,
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
TestResult(f"Read attribute {basic_attr}", res).assertValueEqual(
expected_value)
except Exception as ex:
failed_zcl[basic_attr] = str(ex)
if failed_zcl:
self.logger.exception(f"Following attributes failed: {failed_zcl}")
return False
return True
def TestWriteBasicAttributes(self, nodeid: int, endpoint: int, group: int):
@dataclass
class AttributeWriteRequest:
cluster: str
attribute: str
value: Any
expected_status: IM.Status = IM.Status.Success
requests = [
AttributeWriteRequest("Basic", "UserLabel", "Test"),
AttributeWriteRequest("Basic", "Location",
"a pretty loooooooooooooog string", IM.Status.InvalidValue),
]
failed_zcl = []
for req in requests:
try:
res = self.devCtrl.ZCLWriteAttribute(cluster=req.cluster,
attribute=req.attribute,
nodeid=nodeid,
endpoint=endpoint,
groupid=group,
value=req.value)
TestResult(f"Write attribute {req.cluster}.{req.attribute}", res).assertStatusEqual(
req.expected_status)
if req.expected_status != IM.Status.Success:
# If the write interaction is expected to success, proceed to verify it.
continue
res = self.devCtrl.ZCLReadAttribute(
cluster=req.cluster, attribute=req.attribute, nodeid=nodeid, endpoint=endpoint, groupid=group)
TestResult(f"Read attribute {req.cluster}.{req.attribute}", res).assertValueEqual(
req.value)
except Exception as ex:
failed_zcl.append(str(ex))
if failed_zcl:
self.logger.exception(f"Following attributes failed: {failed_zcl}")
return False
return True
def TestSubscription(self, nodeid: int, endpoint: int):
class _subscriptionHandler(IM.OnSubscriptionReport):
def __init__(self, path: IM.AttributePath, logger: logging.Logger):
super(_subscriptionHandler, self).__init__()
self.subscriptionReceived = 0
self.path = path
self.countLock = threading.Lock()
self.cv = threading.Condition(self.countLock)
self.logger = logger
def OnData(self, path: IM.AttributePath, subscriptionId: int, data: typing.Any) -> None:
if path != self.path:
return
logger.info(
f"Received report from server: path: {path}, value: {data}, subscriptionId: {subscriptionId}")
with self.countLock:
self.subscriptionReceived += 1
self.cv.notify_all()
class _conductAttributeChange(threading.Thread):
def __init__(self, devCtrl: ChipDeviceCtrl.ChipDeviceController, nodeid: int, endpoint: int):
super(_conductAttributeChange, self).__init__()
self.nodeid = nodeid
self.endpoint = endpoint
self.devCtrl = devCtrl
def run(self):
for i in range(5):
time.sleep(3)
self.devCtrl.ZCLSend(
"OnOff", "Toggle", self.nodeid, self.endpoint, 0, {})
try:
subscribedPath = IM.AttributePath(
nodeId=nodeid, endpointId=endpoint, clusterId=6, attributeId=0)
# OnOff Cluster, OnOff Attribute
handler = _subscriptionHandler(subscribedPath, self.logger)
IM.SetAttributeReportCallback(subscribedPath, handler)
self.devCtrl.ZCLSubscribeAttribute(
"OnOff", "OnOff", nodeid, endpoint, 1, 10)
changeThread = _conductAttributeChange(
self.devCtrl, nodeid, endpoint)
# Reset the number of subscriptions received as subscribing causes a callback.
handler.subscriptionReceived = 0
changeThread.start()
with handler.cv:
while handler.subscriptionReceived < 5:
# We should observe 10 attribute changes
handler.cv.wait()
changeThread.join()
return True
except Exception as ex:
self.logger.exception(f"Failed to finish API test: {ex}")
return False
return True
def TestNonControllerAPIs(self):
'''
This function validates various APIs provided by chip package which is not related to controller.
TODO: Add more tests for APIs
'''
try:
cluster = self.devCtrl.GetClusterHandler()
clusterInfo = cluster.GetClusterInfoById(0x50F) # TestCluster
if clusterInfo["clusterName"] != "TestCluster":
raise Exception(
f"Wrong cluster info clusterName: {clusterInfo['clusterName']} expected TestCluster")
except Exception as ex:
self.logger.exception(f"Failed to finish API test: {ex}")
return False
return True
|
from src.config import config
from src.model import param_universe_dao
from src.model.EventEnumerator import EventEnumerator
from src.model.LogUniverseGenerator import *
class PartitionedUniverseIndexer(object):
def __init__(self, scoring_param_info_id):
self.scoring_param_info_id = scoring_param_info_id
self.es = config.es
self.scoring_param_info = param_universe_dao.load_universe_info(self.scoring_param_info_id)
self.op_possible_index = 'flat-op-' + self.scoring_param_info_id.lower()
self.user_possible_index = 'flat-user-' + self.scoring_param_info_id.lower()
self.resource_possible_index = 'flat-resource-' + self.scoring_param_info_id.lower()
self.use_resources = False
def index(self):
# Index user, op, resource combinations
if self.use_resources:
if not self.es.indices.exists(index=self.resource_possible_index):
resource_param_info = self.scoring_param_info.copy()
resource_keys = [s for s in self.scoring_param_info['valid_keys_sets']['valid_keys'] if s.startswith('requestParameters')]
resource_keys.append('eventName')
resource_keys.append('eventSource')
param_universe_dao.prune_param_info(resource_param_info, resource_keys)
resource_event_enumerator = EventEnumerator(resource_param_info)
resource_es_writer = FlatEsUniverseWriter(resource_event_enumerator)
resource_es_writer.index_separated_universes(self.resource_possible_index)
if not self.es.indices.exists(index=self.user_possible_index):
user_param_info = self.scoring_param_info.copy()
param_universe_dao.prune_param_info(user_param_info, self.scoring_param_info['valid_keys_sets']['valid_keys_user'])
if user_param_info['rtopo_sorted_keys']:
user_event_enumerator = EventEnumerator(user_param_info)
user_es_writer = FlatEsUniverseWriter(user_event_enumerator)
user_es_writer.index_separated_universes(self.user_possible_index)
else:
user_es_writer = FlatEsUniverseWriter(None)
user_es_writer.create_index_only(self.user_possible_index)
if not self.es.indices.exists(index=self.op_possible_index):
resource_param_info = self.scoring_param_info.copy()
op_keys = self.scoring_param_info['valid_keys_sets']['valid_keys_op']
if self.use_resources:
keys_to_remove = set()
for key in op_keys:
if key.startswith('requestParameters'):
keys_to_remove.add(key)
for k in keys_to_remove:
op_keys.discard(k)
param_universe_dao.prune_param_info(resource_param_info, op_keys)
resource_event_enumerator = EventEnumerator(resource_param_info)
resource_es_writer = FlatEsUniverseWriter(resource_event_enumerator)
resource_es_writer.index_separated_universes(self.op_possible_index)
def count_possible_privs(self):
#store to total_possible_priv_states
total_priv_size = 0
event_names = self.scoring_param_info['possible_params']['eventName']
user_count = es.count(index=self.user_possible_index, doc_type='doc', body={ "query": {"match_all" : { }}})['count']
if not self.use_resources:
op_count = es.count(index=self.op_possible_index, doc_type='doc', body={ "query": {"match_all" : { }}})['count']
return user_count * op_count
else:
for event_name in event_names:
op_count = es.count(index=self.op_possible_index, doc_type='doc', body={ "query": {"match":{"eventName":event_name }}})['count']
resource_count = es.count(index=self.resource_possible_index, doc_type='doc', body={ "query": {"match":{"eventName":event_name }}})['count']
total_priv_size += user_count * op_count * resource_count
# queries = []
# req_head = {'index': self.resource_possible_index, 'type': 'doc'}
# if user_constraints:
# user_query = RuleUtils.create_terms_filter_from_constraints(user_constraints)
# else:
# user_query = {"query": {"match_all": {}}, "size": 0}
# queries.extend([req_head, user_query])
return total_priv_size
if __name__ == "__main__":
# indexer = PartitionedUniverseIndexer('fields_large_ps_amftrue_pbftrue')
# indexer = PartitionedUniverseIndexer('fields_small_ps_amftrue_pbftrue')
indexer = PartitionedUniverseIndexer('fields_small_ps_amftrue_pbffalse')
indexer.index()
print(indexer.count_possible_privs())
|
import requests
import os
from pathlib import Path
URL = ''
p = Path('~/fotoparadies_api_endpoint.txt').expanduser()
if p.exists():
with p.open() as f:
URL = f.readline()
if 'FOTOPARADIES_API_URL' in os.environ:
URL = os.environ['FOTOPARADIES_API_URL']
def get_order_info(shop, order):
p = {'config':1320, 'shop': shop, 'order': order}
if URL:
r = requests.get(URL, params=p)
else:
raise Exception('No API endpoint', URL)
return r.json()
def get_order_status(shop, order):
return get_order_info(shop, order)['summaryStateCode']
|
"""
requested_toppings = []
if requested_toppings:
for requested_topping in requested_toppings:
print("Adding " + requested_topping + ".")
print("\nFinished making your pizza!")
else:
print("Are you sure you want a plain pizza?")
"""
available_toppings = ['mushrooms', 'olives', 'green peppers',
'pepperonis', 'pineapple', 'extra cheese']
requested_toppings = ['mushrooms', 'french fries', 'extra cheese']
for requested_topping in requested_toppings:
if requested_topping in available_toppings:
print("Adding " + requested_topping + ".")
else:
print("Sorry we don't have " + requested_topping + ".")
print("\nFinished making your pizza.")
|
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
|
#!/usr/bin/env python3
# coding: utf-8
import gettext
import hashlib
import os
import re
import shutil
import sys
from functools import lru_cache
from pathlib import Path
from urllib.parse import unquote
import click
from loguru import logger
import requests
from .config import HOME, GetConfig, SetConfig
_ = gettext.gettext
proxy = GetConfig('proxy')
proxies = {'http': proxy, 'https': proxy} if proxy else {}
logger.remove()
level = 'DEBUG' if GetConfig('debug') == 'yes' else 'INFO'
logger.add(sys.stderr, colorize=True,
format='<level>{level: <8}</level> | <cyan>{function}</cyan> - <level>{message}</level>', level=level)
ua = GetConfig('UA')
DefaultUA = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) Gecko/20100101'
UA = DefaultUA if not ua else ua
timeout = GetConfig('timeout')
timeout = float(timeout) if timeout else 5
def Hash(filepath, algo='sha256'):
if algo in hashlib.algorithms_available:
h = eval(f'hashlib.{algo}()')
blocksize = 2**20
with open(filepath, 'rb') as f:
data = f.read(blocksize)
while data:
h.update(data)
data = f.read(blocksize)
return h.hexdigest()
def Redirect(url: str) -> str:
rules = GetConfig('redirect')
if rules:
for rule in rules:
for pattern, to in rule.items():
m = re.match(pattern, url)
if m:
return to.format(*m.groups())
return url
@lru_cache()
def GetPage(url: str, warn=True, UA=UA, timeout=timeout, redirect=True, tojson=False) -> str:
if redirect:
url = Redirect(url)
logger.debug(f'requesting {url}')
res = requests.get(
url, headers={'User-Agent': UA}, timeout=timeout, proxies=proxies)
if warn and res.status_code != 200:
logger.warning(f'{url} {res.status_code} error')
return 'error'
result = res.json() if tojson else res.text
return result
def Download(url: str, directory='', filename='', output=True, UA=UA, sha256='', redirect=True, timeout=timeout):
UA = 'Wget/1.20.3 (mingw32)' if UA == DefaultUA else UA
if not url.startswith('http'):
return Path(url)
if redirect:
url = Redirect(url)
if not directory:
directory = GetConfig('download_dir')
directory = Path(directory)
if not directory.exists():
directory.mkdir(parents=True)
if not filename:
filename = url.split('/')[-1]
file = directory / filename
cached = file.parent / (file.name+'.cached')
if GetConfig('download_cache') == 'yes' and cached.exists():
return file
if output:
print(_('downloading {url}').format(url=url))
print(_('saving to {path}').format(path=file))
downloader = GetConfig('downloader')
if downloader:
filepath, directory, filename = f'"{file}"', f'"{directory}"', f'"{filename}"'
if '{filepath}' in downloader:
command = downloader.format(url=url, filepath=filepath)
else:
command = downloader.format(
url=url, directory=directory, filename=filename)
os.system(command)
else:
req = requests.get(url, stream=True, proxies=proxies,
headers={'User-Agent': UA}, timeout=timeout)
if req.status_code != 200:
logger.warning(f'{req.status_code} error')
print(' try to download it with downloader')
print(' if you have installed wget')
print(r' try: mpkg set downloader "wget -q -O {filepath} {url}"')
chunk_size = 2**20
contents = req.iter_content(chunk_size=chunk_size)
if 'content-length' in req.headers:
length = int(req.headers['content-length'])/chunk_size
else:
logger.debug('unknown content-length')
length = 0
if not length < 1:
if length > 1024:
label = str(round(length/1024, 1))+'GB'
else:
label = str(round(length, 1))+'MB'
else:
label = ''
with click.progressbar(contents, length=length, label=label) as bar:
with open(str(file), 'wb') as f:
for chunk in bar:
if chunk:
f.write(chunk)
if not file.is_file():
logger.warning(f'no {file}({command})')
if sha256:
sha256 = sha256.lower()
algo, sha256 = sha256.split(
':') if ':' in sha256 else ('sha256', sha256)
print(_('checking {hash}').format(hash=algo))
if sha256 != Hash(file, algo):
logger.warning(f'wrong {algo}')
return file
def Selected(L: list, isSoft=False, msg=_('select (eg: 0,2-5):')) -> list:
cfg = []
for i, x in enumerate(L):
if isSoft:
print(f'{i} -> {x.name}')
else:
print(f'{i} -> {x}')
option = input(f' {msg} ').replace(' ', '').split(',')
print()
for i in option:
if '-' in i:
a, b = i.split('-')
for j in range(int(a), int(b)+1):
cfg.append(L[j])
else:
cfg.append(L[int(i)])
return cfg
def Name(softs):
names, ids = [], []
multiple, named = [], []
for soft in softs:
cfg = soft.get('cfg')
if cfg:
multiple.append(soft)
name = soft.get('name')
if name:
names.append(name)
named.append(soft)
ids.append(soft['id'])
for soft in named:
if soft['name'] in ids or names.count(soft['name']) > 1:
soft['name'] = soft['name']+'-'+soft['id']
for soft in multiple:
if not soft.get('name'):
soft['name'] = soft['id']+'.'+soft['name'].split('.')[-1]
names = []
for soft in softs:
if not soft.get('name'):
soft['name'] = soft['id']
soft['name'] = soft['name'].lower()
names.append(soft['name'])
if len(names) != len(set(names)):
logger.warning(
f'name conflict\n{[n for n in names if names.count(n)!=1]}')
def PreInstall():
SetConfig('download_dir', str(HOME / 'Downloads'), replace=False)
SetConfig('bin_dir', str(HOME / 'bin'), replace=False)
SetConfig('files_dir', str(HOME / 'files'), replace=False)
SetConfig(
'7z', r'"C:\Program Files\7-Zip\7z.exe" x {filepath} -o{root} -aoa > nul', replace=False)
for folder in ['py', 'json', 'zip', 'bin', 'files']:
directory = HOME / folder
if not directory.exists():
directory.mkdir(parents=True)
def DownloadApps(apps):
for app in apps:
app.download_prepare()
for app in apps:
app.download()
def ReplaceDir(root_src_dir, root_dst_dir):
# https://stackoverflow.com/q/7420617
for src_dir, _, files in os.walk(root_src_dir):
dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.move(src_file, dst_dir)
if Path(root_src_dir).exists():
shutil.rmtree(root_src_dir)
def Extract(filepath, root='', ver=''):
filepath = Path(filepath)
if not root:
root = filepath.parent.absolute() / '.'.join(
filepath.name.split('.')[:-1])
ver = '_' + ver if ver else ''
root = Path(str(root)+ver)
extract_dir = root.parent/'mpkg-temp-dir'
cmd = GetConfig('7z').format(filepath=str(filepath), root=extract_dir)
print(_('extracting {filepath} to {root}').format(
filepath=filepath, root=root))
os.system(cmd)
files, root_new = os.listdir(extract_dir), extract_dir
while len(files) == 1:
root_new = root_new/files[0]
if root_new.is_dir():
files = os.listdir(root_new)
else:
root_new = root_new.parent
break
ReplaceDir(str(root_new.absolute()), str(root.absolute()))
if extract_dir.exists():
shutil.rmtree(extract_dir)
return root
def Search(url='', regex='', links='{ver}', ver='', sort=False, reverse=False, UA=UA, sumurl='', findall=False, redirect=True):
if sumurl:
return SearchSum(url, sumurl, UA, redirect=redirect)
if not ver:
page = GetPage(url, UA=UA, redirect=redirect)
i = -1 if reverse else 0
result = re.findall(regex, page)
if sort:
result = sorted(result)
if findall:
return result
ver = result[i]
if isinstance(links, dict):
return dict([(k, v.format(ver=ver)) for k, v in links.items()])
elif isinstance(links, list):
return [item.format(ver=ver) for item in links]
else:
return links.format(ver=ver)
def SearchSum(links, sumurl, UA=UA, redirect=True):
page = GetPage(sumurl, UA=UA, redirect=redirect)
def search(url):
name = unquote(url.split('/')[-1])
return re.search('(\\w+)\\s+\\*?'+name, page).groups()[0]
if isinstance(links, dict):
return dict([(k, search(v)) for k, v in links.items()])
elif isinstance(links, list):
return [search(item) for item in links]
else:
return search(links)
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm#l3xpc86sb2j6k9m&p($fwat7r^_d_pkwr2rk3i99%8vusqt-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# I needed to commit again
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'user.apps.UserConfig',
'core.apps.CoreConfig',
'recipe.apps.RecipeConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User'
|
from collections import defaultdict
from scrapy import log
from scrapy.exceptions import NotConfigured
from scrapy.utils.misc import load_object
from scrapy.utils.defer import process_parallel, process_chain, process_chain_both
class MiddlewareManager(object):
"""Base class for implementing middleware managers"""
component_name = 'foo middleware'
def __init__(self, *middlewares):
self.middlewares = middlewares
self.methods = defaultdict(list)
for mw in middlewares:
self._add_middleware(mw)
@classmethod
def _get_mwlist_from_settings(cls, settings):
raise NotImplementedError
@classmethod
def from_settings(cls, settings, crawler=None):
mwlist = cls._get_mwlist_from_settings(settings)
middlewares = []
for clspath in mwlist:
try:
mwcls = load_object(clspath)
if crawler and hasattr(mwcls, 'from_crawler'):
mw = mwcls.from_crawler(crawler)
elif hasattr(mwcls, 'from_settings'):
mw = mwcls.from_settings(settings)
else:
mw = mwcls()
middlewares.append(mw)
except NotConfigured, e:
if e.args:
clsname = clspath.split('.')[-1]
log.msg("Disabled %s: %s" % (clsname, e.args[0]), log.WARNING)
enabled = [x.__class__.__name__ for x in middlewares]
log.msg("Enabled %ss: %s" % (cls.component_name, ", ".join(enabled)), \
level=log.DEBUG)
return cls(*middlewares)
@classmethod
def from_crawler(cls, crawler):
return cls.from_settings(crawler.settings, crawler)
def _add_middleware(self, mw):
if hasattr(mw, 'open_spider'):
self.methods['open_spider'].append(mw.open_spider)
if hasattr(mw, 'close_spider'):
self.methods['close_spider'].insert(0, mw.close_spider)
def _process_parallel(self, methodname, obj, *args):
return process_parallel(self.methods[methodname], obj, *args)
def _process_chain(self, methodname, obj, *args):
return process_chain(self.methods[methodname], obj, *args)
def _process_chain_both(self, cb_methodname, eb_methodname, obj, *args):
return process_chain_both(self.methods[cb_methodname], \
self.methods[eb_methodname], obj, *args)
def open_spider(self, spider):
return self._process_parallel('open_spider', spider)
def close_spider(self, spider):
return self._process_parallel('close_spider', spider)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence-to-sequence model with an attention mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# from tensorflow import Print
import seq2seq
import data_utils
np.set_printoptions(threshold=np.nan)
class Seq2SeqModel(object):
"""Sequence-to-sequence model with attention and for multiple buckets.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
this paper: http://arxiv.org/abs/1412.7449 - please look there for details,
or into the seq2seq library for complete model implementation.
This class also allows to use GRU cells in addition to LSTM cells, and
sampled softmax to handle large output vocabulary size. A single-layer
version of this model, but with bi-directional encoder, was presented in
http://arxiv.org/abs/1409.0473
and sampled softmax is described in Section 3 of the following paper.
http://arxiv.org/abs/1412.2007
"""
def __init__(self,
source_vocab_size,
target_vocab_size,
buckets,
size,
num_layers,
max_gradient_norm,
batch_size,
learning_rate,
learning_rate_decay_factor,
beam_size,
use_lstm=False,
num_samples=512,
forward_only=False,
dtype=tf.float32):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
dtype: the data type to use to store internal variables.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(
float(learning_rate), trainable=False, dtype=dtype)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
w_t = tf.get_variable("proj_w", [self.target_vocab_size, size], dtype=dtype)
w = tf.transpose(w_t)
b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype)
output_projection = (w, b)
def sampled_loss(inputs, labels):
labels = tf.reshape(labels, [-1, 1])
# We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities.
local_w_t = tf.cast(w_t, tf.float32)
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(inputs, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(local_w_t, local_b, local_inputs, labels,
num_samples, self.target_vocab_size),
dtype)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return seq2seq.embedding_attention_seq2seq(
encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
beam_size=beam_size,
output_projection=output_projection,
feed_previous=do_decode,
dtype=dtype)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(dtype, shape=[None],
name="weight{0}".format(i)))
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in xrange(len(self.decoder_inputs) - 1)]
# Training outputs and losses.
if forward_only:
self.outputs, self.losses, self.symbols = seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
# if output_projection is not None:
# for b in xrange(len(buckets)):
# self.outputs[b] = [
# tf.matmul(output, output_projection[0]) + output_projection[1]
# for output in self.outputs[b]
# ]
else:
self.outputs, self.losses, self.symbols = seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# self.outputs = Print(self.outputs, self.outputs, message="i'm printing ")
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,
max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.all_variables(), max_to_keep=1000,
keep_checkpoint_every_n_hours=6)
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of encoder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
#print('-----------------------------',decoder_size)
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for l in xrange(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in xrange(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
#print('=======================================',self.symbols)
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
# for l in xrange(decoder_size): # Output logits.
# output_feed.append(self.outputs[bucket_id][l])
#print('=======================================',self.symbols)
if self.symbols[0]:
#print('-------------aaaaaaaaaaaaaaaaaaa----------------')
for l in xrange(decoder_size): # Output symbols
output_feed.append(self.symbols[bucket_id][l])
else:
#print('----------------bbbbbbbbbbbbbbbb-------------')
for l in xrange(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def get_batch(self, data, bucket_id):
"""Get a random batch of data from the specified bucket, prepare for step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...) later.
"""
encoder_size, decoder_size = self.buckets[bucket_id]
#print('--------------encoder_size:',encoder_size)
#print('--------------decoder_size:',decoder_size)
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data,
# pad them if needed, reverse encoder inputs and add GO to decoder.
for _ in xrange(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
#print('---------------------encoder_input:',encoder_input)
#print('---------------------decoder_input:',decoder_input)
# Encoder inputs are padded and then reversed.
encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))
#print('---------------------encoder_pad:',encoder_pad)
#print('---------------------encoder_input:',encoder_input)
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([data_utils.GO_ID] + decoder_input +
[data_utils.PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in xrange(self.batch_size):
# We set weight to 0 if the corresponding target is a PAD symbol.
# The corresponding target is decoder_input shifted by 1 forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights
|
import numpy as np
from multiagent.core import World, Agent, Landmark, Border
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 10
num_landmarks = 10
world.collaborative = False
# Control partial observability of the agents
self.vision_range = 3 # multiplier of agent size
self.land_vision_count = 4
self.agent_vision_count = 3 # include the self agent, that is +1
# add agents
world.agents = [Agent() for _ in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.size = 0.15 / (num_agents / 6)
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.05 / (num_landmarks / 6)
self.occ_land_dist = world.agents[0].size + world.landmarks[0].size
self.reset_world(world)
return world
def reset_world(self, world):
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.35, 0.35, 0.85])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
dists = [np.sqrt(np.sum(np.square(agent.state.p_pos - l.state.p_pos))) for l in world.landmarks]
rew -= min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if a is not agent:
if self.is_collision(a, agent):
rew -= 1
collisions += 1
info = {'success': [], 'collisions': [], 'rew': [], 'min_dists': [], 'occ_land': []}
info['collisions'].append(collisions)
info['occ_land'].append(occupied_landmarks)
info['rew'].append(rew)
info['min_dists'].append(min(dists))
return info
# return (rew, collisions, min_dists, occupied_landmarks)
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions
rew = 0
dists = [np.sqrt(np.sum(np.square(agent.state.p_pos - l.state.p_pos))) for l in world.landmarks]
rew -= min(dists)
if not min(dists) < self.occ_land_dist:
rew -= 1
if agent.collide:
for a in world.agents:
if a is not agent:
if self.is_collision(a, agent):
rew -= 1
return rew
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = self.get_land_vision(agent, world)
other_pos = self.get_agent_vision(agent, world)
return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos) # + comm
def get_agent_vision(self, agent, world):
dist = []
for other in world.agents:
dist.append(np.sqrt(np.sum(np.square(other.state.p_pos - agent.state.p_pos))))
sorted_dist = np.argsort(dist)
agent_idx = sorted_dist[1:self.agent_vision_count+1] # max. number of agents it can observe
agent_vision = []
for i, other in enumerate(world.agents):
if i in agent_idx:
agent_vision.append(other.state.p_pos - agent.state.p_pos)
return agent_vision
def get_land_vision(self, agent, world):
dist = []
for entity in world.landmarks:
dist.append(np.sqrt(np.sum(np.square(entity.state.p_pos - agent.state.p_pos))))
# Ascending sort, and retrieve the index of landmarks in that order
sorted_dist = np.argsort(dist)
land_idx = sorted_dist[0:self.land_vision_count] # max. number of landmarks that it can observe
# Check if these landmarks are in the vision range and populate observation
land_vision = []
for i, entity in enumerate(world.landmarks):
if i in land_idx:
land_vision.append(entity.state.p_pos - agent.state.p_pos)
return land_vision
|
from datetime import datetime
import json
import logging
from kubernetes import client, config
from kubernetes.client import api_client
# Configs can be set in Configuration class directly or using helper utility
from rules import *
def main():
aConfiguration = config.load_kube_config()
aApiClient = client.ApiClient(aConfiguration)
v1 = client.CoreV1Api(aApiClient)
print("Listing all pods in the cluster:")
RUNNING_STATUS = "Running"
rules = [image_prefix_check, team_label_present_check, recent_start_time_check]
pods = v1.list_pod_for_all_namespaces(watch=False)
for item in pods.items:
status = str(item.status.phase)
if status == RUNNING_STATUS:
pod_name = item.metadata.name
start_time = item.status.start_time
pod_result = PodResult(pod_name)
for rule in rules:
try:
rule_result = rule.check(item)
pod_result.add_rule_result(rule_result)
except Exception as e:
logging.error(e)
output = pod_result.to_json()
print(output)
if __name__ == '__main__':
main()
|
import os
import dlib
import numpy as np
from deepface.confs.conf import DeepFaceConfs
from deepface.utils.bbox import BoundingBox
from .detector_base import FaceDetector
class FaceDetectorDlib(FaceDetector):
"""
reference : https://www.pyimagesearch.com/2017/04/03/facial-landmarks-dlib-opencv-python/
"""
NAME = 'detector_dlib'
def __init__(self):
super(FaceDetectorDlib, self).__init__()
self.detector = dlib.get_frontal_face_detector()
predictor_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
DeepFaceConfs.get()['detector']['dlib']['landmark_detector']
)
self.predictor = dlib.shape_predictor(predictor_path)
self.upsample_scale = DeepFaceConfs.get()['detector']['dlib']['scale']
def name(self):
return FaceDetectorDlib.NAME
def detect(self, npimg):
dets, scores, idx = self.detector.run(npimg, self.upsample_scale, -1)
faces = []
for det, score in zip(dets, scores):
if score < DeepFaceConfs.get()['detector']['dlib']['score_th']:
continue
x = max(det.left(), 0)
y = max(det.top(), 0)
w = min(det.right() - det.left(), npimg.shape[1] - x)
h = min(det.bottom() - det.top(), npimg.shape[0] - y)
if w <= 1 or h <= 1:
continue
bbox = BoundingBox(x, y, w, h, score)
# find landmark
bbox.face_landmark = self.detect_landmark(npimg, det)
faces.append(bbox)
faces = sorted(faces, key=lambda x: x.score, reverse=True)
return faces
def detect_landmark(self, npimg, det):
shape = self.predictor(npimg, det)
coords = np.zeros((68, 2), dtype=np.int)
# loop over the 68 facial landmarks and convert them
# to a 2-tuple of (x, y)-coordinates
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
|
# Generated by Django 3.2.3 on 2021-05-30 00:22
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_alter_student_phone_number'),
]
operations = [
migrations.AlterField(
model_name='student',
name='email',
field=models.CharField(max_length=255, null=True, validators=[django.core.validators.EmailValidator('Email is not valid')]),
),
]
|
import re
from pathlib import Path
from nameparser import HumanName
from clldutils.misc import slug
from pylexibank.providers import abvd
from pylexibank.util import progressbar
from pylexibank import FormSpec
def normalize_contributors(l):
for key in ['checkedby', 'typedby']:
l[key] = normalize_names(l[key])
return l
def normalize_names(names):
res = []
if names:
for name in re.split('\s+and\s+|\s*&\s*|,\s+|\s*\+\s*', names):
name = {
'Simon': 'Simon Greenhill',
'D. Mead': 'David Mead',
'Alex François': 'Alexandre François',
'Dr Alex François': 'Alexandre François',
'R. Blust': 'Robert Blust',
}.get(name, name)
name = HumanName(name.title())
res.append('{0} {1}'.format(name.first or name.title, name.last).strip())
return ' and '.join(res)
class Dataset(abvd.BVD):
dir = Path(__file__).parent
id = 'abvd'
SECTION = 'austronesian'
invalid_ids = [
261, # Duplicate West Futuna list
]
max_language_id = 2000
form_spec = FormSpec(
brackets={"[": "]", "{": "}", "(": ")"},
separators=";/,~",
missing_data=('-', ),
strip_inside_brackets=True,
)
def cmd_makecldf(self, args):
args.writer.add_sources(*self.etc_dir.read_bib())
concepts = args.writer.add_concepts(
id_factory=lambda c: c.id.split('-')[-1]+ '_' + slug(c.english),
lookup_factory=lambda c: c['ID'].split('_')[0]
)
for wl in progressbar(self.iter_wordlists(args.log), desc="cldfify"):
wl.to_cldf(args.writer, concepts)
# Now normalize the typedby and checkedby values:
args.writer.objects['LanguageTable'][-1] = normalize_contributors(args.writer.objects['LanguageTable'][-1])
|
from ..web import Links
from ..web.html import unescape
from . import Public, Secure
from . import WebEntity
from . import Post
from . import Image
__all__ = ['Thread']
class Thread (WebEntity):
"""
Represents a thread.
"""
default_object = {'posts':[]}
def __init__ (self, board, thread):
"""
Initializes an instance from a board and a thread number.
"""
self.board = board
self.thread = thread
def __repr__ (self):
"""
Returns a string representation fit for eval.
"""
return (
'{self.__class__.__name__}({})'.format (
', '.join(map(repr, (self.board, self.thread))),
self=self
)
)
@property
def apiurl (self):
"""
Returns an url to the corresponding API json page.
"""
return Links.createAPIURL (
'/{self.board}/thread/{self.thread}.json'.format(self=self)
)
@property
def url (self):
"""
Returns an url to the thread.
"""
return Links.createURL (
'/{self.board}/thread/{self.thread}'.format(self=self)
)
def process (self):
"""
Returns the Post instances you get by evaluating the thread.
"""
thread = self.download_and_decode()
posts = []
for post in thread['posts']:
post['trip'] = str(post.get('trip', ''))
pub_match = Public.pattern.match (post['trip'])
sec_match = Secure.pattern.search(post['trip'])
public = Public(pub_match.group(1)) if pub_match else None
secure = Secure(sec_match.group(1)) if sec_match else None
name = unescape(post.get('name', ''))
name = name.encode('utf8')
if post.has_key('tim') and post.has_key('ext'):
post['image'] = Image (
self.board,
post['tim'], post['ext'].encode('utf8'),
post['filename'].encode('utf8')
)
posts.append (
Post (
name = name,
time = post['time'],
board = self.board,
thread = self.thread,
post = post['no'],
public = public,
secure = secure,
image = post.get('image')
)
)
return posts
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for Ideal + Ideal Liquid (i.e. no activity coefficient) state block;
only tests for construction as parameters need to be provided or estimated
from VLE data to compute the activity coefficients.
Author: Jaffer Ghouse
"""
import pytest
from pyomo.environ import check_optimal_termination, ConcreteModel, value
from pyomo.util.check_units import assert_units_consistent
from idaes.core import FlowsheetBlock
from idaes.models.properties.activity_coeff_models.BTX_activity_coeff_VLE import (
BTXParameterBlock,
)
from idaes.core.util.model_statistics import (
degrees_of_freedom,
fixed_variables_set,
activated_constraints_set,
)
from idaes.core.solvers import get_solver
solver = get_solver()
# -----------------------------------------------------------------------------
class TestFTPz_LV_inlet:
@pytest.fixture(scope="class")
def model(self):
# Create a flowsheet for test
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties_ideal_vl = BTXParameterBlock(
default={
"valid_phase": ("Liq", "Vap"),
"activity_coeff_model": "Ideal",
"state_vars": "FTPz",
}
)
m.fs.state_block_ideal_vl = m.fs.properties_ideal_vl.build_state_block(
[0],
default={"defined_state": True}
)
m.fs.state_block_ideal_vl[0].flow_mol.fix(1)
m.fs.state_block_ideal_vl[0].temperature.fix(368)
m.fs.state_block_ideal_vl[0].pressure.fix(101325)
m.fs.state_block_ideal_vl[0].mole_frac_comp["benzene"].fix(0.5)
m.fs.state_block_ideal_vl[0].mole_frac_comp["toluene"].fix(0.5)
return m
@pytest.mark.unit
def test_build(self, model):
assert len(model.fs.properties_ideal_vl.config) == 4
assert model.fs.properties_ideal_vl.config.valid_phase == ("Liq", "Vap")
assert len(model.fs.properties_ideal_vl.phase_list) == 2
assert model.fs.properties_ideal_vl.phase_list == ["Liq", "Vap"]
assert model.fs.state_block_ideal_vl[0].config.defined_state
assert hasattr(model.fs.state_block_ideal_vl[0], "eq_phase_equilibrium")
assert not hasattr(model.fs.state_block_ideal_vl[0], "eq_activity_coeff")
assert not hasattr(model.fs.state_block_ideal_vl[0], "eq_mol_frac_out")
@pytest.mark.unit
def test_dof(self, model):
assert degrees_of_freedom(model.fs.state_block_ideal_vl[0]) == 0
@pytest.mark.component
def test_units_consistent(self, model):
assert_units_consistent(model)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, model):
orig_fixed_vars = fixed_variables_set(model)
orig_act_consts = activated_constraints_set(model)
model.fs.state_block_ideal_vl.initialize()
assert degrees_of_freedom(model) == 0
fin_fixed_vars = fixed_variables_set(model)
fin_act_consts = activated_constraints_set(model)
assert len(fin_act_consts) == len(orig_act_consts)
assert len(fin_fixed_vars) == len(orig_fixed_vars)
for c in fin_act_consts:
assert c in orig_act_consts
for v in fin_fixed_vars:
assert v in orig_fixed_vars
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert value(
model.fs.state_block_ideal_vl[0].mole_frac_phase_comp["Liq", "benzene"]
) == pytest.approx(0.4121, abs=1e-3)
assert value(
model.fs.state_block_ideal_vl[0].mole_frac_phase_comp["Vap", "benzene"]
) == pytest.approx(0.6339, abs=1e-3)
class TestFTPz_L_inlet:
@pytest.fixture(scope="class")
def model(self):
# Create a flowsheet for test
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties_ideal_l = BTXParameterBlock(
default={
"valid_phase": "Liq",
"activity_coeff_model": "Ideal",
"state_vars": "FTPz",
}
)
m.fs.state_block_ideal_l = m.fs.properties_ideal_l.build_state_block(
[0],
default={"has_phase_equilibrium": False, "defined_state": True}
)
m.fs.state_block_ideal_l[0].flow_mol.fix(1)
m.fs.state_block_ideal_l[0].temperature.fix(368)
m.fs.state_block_ideal_l[0].pressure.fix(101325)
m.fs.state_block_ideal_l[0].mole_frac_comp["benzene"].fix(0.5)
m.fs.state_block_ideal_l[0].mole_frac_comp["toluene"].fix(0.5)
return m
@pytest.mark.unit
def test_build(self, model):
assert len(model.fs.properties_ideal_l.config) == 4
assert model.fs.properties_ideal_l.config.valid_phase == "Liq"
assert len(model.fs.properties_ideal_l.phase_list) == 1
assert model.fs.properties_ideal_l.phase_list == ["Liq"]
assert model.fs.state_block_ideal_l[0].config.defined_state
assert not hasattr(model.fs.state_block_ideal_l[0], "eq_phase_equilibrium")
assert not hasattr(model.fs.state_block_ideal_l[0], "eq_activity_coeff")
assert not hasattr(model.fs.state_block_ideal_l[0], "eq_mol_frac_out")
@pytest.mark.unit
def test_dof(self, model):
assert degrees_of_freedom(model.fs.state_block_ideal_l[0]) == 0
@pytest.mark.component
def test_units_consistent(self, model):
assert_units_consistent(model)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, model):
orig_fixed_vars = fixed_variables_set(model)
orig_act_consts = activated_constraints_set(model)
model.fs.state_block_ideal_l.initialize()
assert degrees_of_freedom(model) == 0
fin_fixed_vars = fixed_variables_set(model)
fin_act_consts = activated_constraints_set(model)
assert len(fin_act_consts) == len(orig_act_consts)
assert len(fin_fixed_vars) == len(orig_fixed_vars)
for c in fin_act_consts:
assert c in orig_act_consts
for v in fin_fixed_vars:
assert v in orig_fixed_vars
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert value(
model.fs.state_block_ideal_l[0].mole_frac_phase_comp["Liq", "benzene"]
) == pytest.approx(0.5, abs=1e-3)
assert value(
model.fs.state_block_ideal_l[0].mole_frac_phase_comp["Liq", "benzene"]
) == pytest.approx(0.5, abs=1e-3)
class TestFTPz_V_inlet:
@pytest.fixture(scope="class")
def model(self):
# Create a flowsheet for test
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties_ideal_v = BTXParameterBlock(
default={
"valid_phase": "Vap",
"activity_coeff_model": "Ideal",
"state_vars": "FTPz",
}
)
m.fs.state_block_ideal_v = m.fs.properties_ideal_v.build_state_block(
[0],
default={"has_phase_equilibrium": False, "defined_state": True}
)
m.fs.state_block_ideal_v[0].flow_mol.fix(1)
m.fs.state_block_ideal_v[0].temperature.fix(368)
m.fs.state_block_ideal_v[0].pressure.fix(101325)
m.fs.state_block_ideal_v[0].mole_frac_comp["benzene"].fix(0.5)
m.fs.state_block_ideal_v[0].mole_frac_comp["toluene"].fix(0.5)
return m
@pytest.mark.unit
def test_build(self, model):
assert len(model.fs.properties_ideal_v.config) == 4
assert model.fs.properties_ideal_v.config.valid_phase == "Vap"
assert len(model.fs.properties_ideal_v.phase_list) == 1
assert model.fs.properties_ideal_v.phase_list == ["Vap"]
assert model.fs.state_block_ideal_v[0].config.defined_state
assert not hasattr(model.fs.state_block_ideal_v[0], "eq_phase_equilibrium")
assert not hasattr(model.fs.state_block_ideal_v[0], "eq_activity_coeff")
assert not hasattr(model.fs.state_block_ideal_v[0], "eq_mol_frac_out")
@pytest.mark.unit
def test_dof(self, model):
assert degrees_of_freedom(model.fs.state_block_ideal_v[0]) == 0
@pytest.mark.component
def test_units_consistent(self, model):
assert_units_consistent(model)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, model):
orig_fixed_vars = fixed_variables_set(model)
orig_act_consts = activated_constraints_set(model)
model.fs.state_block_ideal_v.initialize()
assert degrees_of_freedom(model) == 0
fin_fixed_vars = fixed_variables_set(model)
fin_act_consts = activated_constraints_set(model)
assert len(fin_act_consts) == len(orig_act_consts)
assert len(fin_fixed_vars) == len(orig_fixed_vars)
for c in fin_act_consts:
assert c in orig_act_consts
for v in fin_fixed_vars:
assert v in orig_fixed_vars
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert value(
model.fs.state_block_ideal_v[0].mole_frac_phase_comp["Vap", "benzene"]
) == pytest.approx(0.5, abs=1e-3)
assert value(
model.fs.state_block_ideal_v[0].mole_frac_phase_comp["Vap", "benzene"]
) == pytest.approx(0.5, abs=1e-3)
class TestFTPz_LV_outlet:
@pytest.fixture(scope="class")
def model(self):
# Create a flowsheet for test
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties_ideal_vl = BTXParameterBlock(
default={
"valid_phase": ("Liq", "Vap"),
"activity_coeff_model": "Ideal",
"state_vars": "FTPz",
}
)
m.fs.state_block_ideal_vl = m.fs.properties_ideal_vl.build_state_block(
[0],
default={"defined_state": False}
)
m.fs.state_block_ideal_vl[0].flow_mol.fix(1)
m.fs.state_block_ideal_vl[0].temperature.fix(368)
m.fs.state_block_ideal_vl[0].pressure.fix(101325)
m.fs.state_block_ideal_vl[0].mole_frac_comp["benzene"].fix(0.5)
return m
@pytest.mark.unit
def test_build(self, model):
assert len(model.fs.properties_ideal_vl.config) == 4
assert model.fs.properties_ideal_vl.config.valid_phase == ("Liq", "Vap")
assert len(model.fs.properties_ideal_vl.phase_list) == 2
assert model.fs.properties_ideal_vl.phase_list == ["Liq", "Vap"]
assert not model.fs.state_block_ideal_vl[0].config.defined_state
assert hasattr(model.fs.state_block_ideal_vl[0], "eq_phase_equilibrium")
assert not hasattr(model.fs.state_block_ideal_vl[0], "eq_activity_coeff")
assert hasattr(model.fs.state_block_ideal_vl[0], "eq_mol_frac_out")
@pytest.mark.unit
def test_dof(self, model):
assert degrees_of_freedom(model.fs.state_block_ideal_vl[0]) == 0
@pytest.mark.component
def test_units_consistent(self, model):
assert_units_consistent(model)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, model):
orig_fixed_vars = fixed_variables_set(model)
orig_act_consts = activated_constraints_set(model)
model.fs.state_block_ideal_vl.initialize()
assert degrees_of_freedom(model) == 0
fin_fixed_vars = fixed_variables_set(model)
fin_act_consts = activated_constraints_set(model)
assert len(fin_act_consts) == len(orig_act_consts)
assert len(fin_fixed_vars) == len(orig_fixed_vars)
for c in fin_act_consts:
assert c in orig_act_consts
for v in fin_fixed_vars:
assert v in orig_fixed_vars
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert value(
model.fs.state_block_ideal_vl[0].mole_frac_phase_comp["Liq", "benzene"]
) == pytest.approx(0.4121, abs=1e-3)
assert value(
model.fs.state_block_ideal_vl[0].mole_frac_phase_comp["Vap", "benzene"]
) == pytest.approx(0.6339, abs=1e-3)
class TestFTPz_L_outlet:
@pytest.fixture(scope="class")
def model(self):
# Create a flowsheet for test
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties_ideal_l = BTXParameterBlock(
default={
"valid_phase": "Liq",
"activity_coeff_model": "Ideal",
"state_vars": "FTPz",
}
)
m.fs.state_block_ideal_l = m.fs.properties_ideal_l.build_state_block(
[0],
default={"has_phase_equilibrium": False, "defined_state": False}
)
m.fs.state_block_ideal_l[0].flow_mol.fix(1)
m.fs.state_block_ideal_l[0].temperature.fix(368)
m.fs.state_block_ideal_l[0].pressure.fix(101325)
m.fs.state_block_ideal_l[0].mole_frac_comp["benzene"].fix(0.5)
return m
@pytest.mark.unit
def test_build(self, model):
assert len(model.fs.properties_ideal_l.config) == 4
assert model.fs.properties_ideal_l.config.valid_phase == "Liq"
assert len(model.fs.properties_ideal_l.phase_list) == 1
assert model.fs.properties_ideal_l.phase_list == ["Liq"]
assert not model.fs.state_block_ideal_l[0].config.defined_state
assert not hasattr(model.fs.state_block_ideal_l[0], "eq_phase_equilibrium")
assert not hasattr(model.fs.state_block_ideal_l[0], "eq_activity_coeff")
assert hasattr(model.fs.state_block_ideal_l[0], "eq_mol_frac_out")
@pytest.mark.unit
def test_dof(self, model):
assert degrees_of_freedom(model.fs.state_block_ideal_l[0]) == 0
@pytest.mark.component
def test_units_consistent(self, model):
assert_units_consistent(model)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, model):
orig_fixed_vars = fixed_variables_set(model)
orig_act_consts = activated_constraints_set(model)
model.fs.state_block_ideal_l.initialize()
assert degrees_of_freedom(model) == 0
fin_fixed_vars = fixed_variables_set(model)
fin_act_consts = activated_constraints_set(model)
assert len(fin_act_consts) == len(orig_act_consts)
assert len(fin_fixed_vars) == len(orig_fixed_vars)
for c in fin_act_consts:
assert c in orig_act_consts
for v in fin_fixed_vars:
assert v in orig_fixed_vars
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert value(
model.fs.state_block_ideal_l[0].mole_frac_phase_comp["Liq", "benzene"]
) == pytest.approx(0.5, abs=1e-3)
assert value(
model.fs.state_block_ideal_l[0].mole_frac_phase_comp["Liq", "benzene"]
) == pytest.approx(0.5, abs=1e-3)
class TestFTPz_V_outlet:
@pytest.fixture(scope="class")
def model(self):
# Create a flowsheet for test
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties_ideal_v = BTXParameterBlock(
default={
"valid_phase": "Vap",
"activity_coeff_model": "Ideal",
"state_vars": "FTPz",
}
)
m.fs.state_block_ideal_v = m.fs.properties_ideal_v.build_state_block(
[0],
default={"has_phase_equilibrium": False, "defined_state": False}
)
m.fs.state_block_ideal_v[0].flow_mol.fix(1)
m.fs.state_block_ideal_v[0].temperature.fix(368)
m.fs.state_block_ideal_v[0].pressure.fix(101325)
m.fs.state_block_ideal_v[0].mole_frac_comp["benzene"].fix(0.5)
return m
@pytest.mark.unit
def test_build(self, model):
assert len(model.fs.properties_ideal_v.config) == 4
assert model.fs.properties_ideal_v.config.valid_phase == "Vap"
assert len(model.fs.properties_ideal_v.phase_list) == 1
assert model.fs.properties_ideal_v.phase_list == ["Vap"]
assert not model.fs.state_block_ideal_v[0].config.defined_state
assert not hasattr(model.fs.state_block_ideal_v[0], "eq_phase_equilibrium")
assert not hasattr(model.fs.state_block_ideal_v[0], "eq_activity_coeff")
assert hasattr(model.fs.state_block_ideal_v[0], "eq_mol_frac_out")
@pytest.mark.unit
def test_dof(self, model):
assert degrees_of_freedom(model.fs.state_block_ideal_v[0]) == 0
@pytest.mark.component
def test_units_consistent(self, model):
assert_units_consistent(model)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, model):
orig_fixed_vars = fixed_variables_set(model)
orig_act_consts = activated_constraints_set(model)
model.fs.state_block_ideal_v.initialize()
assert degrees_of_freedom(model) == 0
fin_fixed_vars = fixed_variables_set(model)
fin_act_consts = activated_constraints_set(model)
assert len(fin_act_consts) == len(orig_act_consts)
assert len(fin_fixed_vars) == len(orig_fixed_vars)
for c in fin_act_consts:
assert c in orig_act_consts
for v in fin_fixed_vars:
assert v in orig_fixed_vars
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert value(
model.fs.state_block_ideal_v[0].mole_frac_phase_comp["Vap", "benzene"]
) == pytest.approx(0.5, abs=1e-3)
assert value(
model.fs.state_block_ideal_v[0].mole_frac_phase_comp["Vap", "benzene"]
) == pytest.approx(0.5, abs=1e-3)
|
from math import sin, radians, cos, tan
ang = float(input('Digite o valor: '))
sen = sin(radians(ang))
cose = cos(radians(ang))
tang = tan(radians(ang))
print('No angulo de {}, O Seno é {:.2f}, Cosseno é {:.2f} e a Targente é {:.2f}'.format(ang, sen, cose, tang,))
|
import functools
import logging
from pip._vendor import six
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible
from pip._vendor.resolvelib import Resolver as RLResolver
from pip._internal.exceptions import InstallationError
from pip._internal.req.req_install import check_invalid_constraint_type
from pip._internal.req.req_set import RequirementSet
from pip._internal.resolution.base import BaseResolver
from pip._internal.resolution.resolvelib.provider import PipProvider
from pip._internal.utils.misc import dist_is_editable
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from .factory import Factory
if MYPY_CHECK_RUNNING:
from typing import Dict, List, Optional, Set, Tuple
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.resolvelib.resolvers import Result
from pip._vendor.resolvelib.structs import Graph
from pip._internal.cache import WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.req_install import InstallRequirement
from pip._internal.resolution.base import InstallRequirementProvider
logger = logging.getLogger(__name__)
class Resolver(BaseResolver):
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer, # type: RequirementPreparer
finder, # type: PackageFinder
wheel_cache, # type: Optional[WheelCache]
make_install_req, # type: InstallRequirementProvider
use_user_site, # type: bool
ignore_dependencies, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
force_reinstall, # type: bool
upgrade_strategy, # type: str
py_version_info=None, # type: Optional[Tuple[int, ...]]
):
super(Resolver, self).__init__()
assert upgrade_strategy in self._allowed_strategies
self.factory = Factory(
finder=finder,
preparer=preparer,
make_install_req=make_install_req,
wheel_cache=wheel_cache,
use_user_site=use_user_site,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
py_version_info=py_version_info,
)
self.ignore_dependencies = ignore_dependencies
self.upgrade_strategy = upgrade_strategy
self._result = None # type: Optional[Result]
def resolve(self, root_reqs, check_supported_wheels):
# type: (List[InstallRequirement], bool) -> RequirementSet
constraints = {} # type: Dict[str, SpecifierSet]
user_requested = set() # type: Set[str]
requirements = []
for req in root_reqs:
if req.constraint:
# Ensure we only accept valid constraints
problem = check_invalid_constraint_type(req)
if problem:
raise InstallationError(problem)
if not req.match_markers():
continue
name = canonicalize_name(req.name)
if name in constraints:
constraints[name] = constraints[name] & req.specifier
else:
constraints[name] = req.specifier
else:
if req.user_supplied and req.name:
user_requested.add(canonicalize_name(req.name))
r = self.factory.make_requirement_from_install_req(
req, requested_extras=(),
)
if r is not None:
requirements.append(r)
provider = PipProvider(
factory=self.factory,
constraints=constraints,
ignore_dependencies=self.ignore_dependencies,
upgrade_strategy=self.upgrade_strategy,
user_requested=user_requested,
)
reporter = BaseReporter()
resolver = RLResolver(provider, reporter)
try:
try_to_avoid_resolution_too_deep = 2000000
self._result = resolver.resolve(
requirements, max_rounds=try_to_avoid_resolution_too_deep,
)
except ResolutionImpossible as e:
error = self.factory.get_installation_error(e)
six.raise_from(error, e)
req_set = RequirementSet(check_supported_wheels=check_supported_wheels)
for candidate in self._result.mapping.values():
ireq = candidate.get_install_requirement()
if ireq is None:
continue
# Check if there is already an installation under the same name,
# and set a flag for later stages to uninstall it, if needed.
# * There isn't, good -- no uninstalltion needed.
# * The --force-reinstall flag is set. Always reinstall.
# * The installation is different in version or editable-ness, so
# we need to uninstall it to install the new distribution.
# * The installed version is the same as the pending distribution.
# Skip this distrubiton altogether to save work.
installed_dist = self.factory.get_dist_to_uninstall(candidate)
if installed_dist is None:
ireq.should_reinstall = False
elif self.factory.force_reinstall:
ireq.should_reinstall = True
elif installed_dist.parsed_version != candidate.version:
ireq.should_reinstall = True
elif dist_is_editable(installed_dist) != candidate.is_editable:
ireq.should_reinstall = True
else:
continue
link = candidate.source_link
if link and link.is_yanked:
# The reason can contain non-ASCII characters, Unicode
# is required for Python 2.
msg = (
u'The candidate selected for download or install is a '
u'yanked version: {name!r} candidate (version {version} '
u'at {link})\nReason for being yanked: {reason}'
).format(
name=candidate.name,
version=candidate.version,
link=link,
reason=link.yanked_reason or u'<none given>',
)
logger.warning(msg)
req_set.add_named_requirement(ireq)
reqs = req_set.all_requirements
self.factory.preparer.prepare_linked_requirements_more(reqs)
return req_set
def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[InstallRequirement]
"""Get order for installation of requirements in RequirementSet.
The returned list contains a requirement before another that depends on
it. This helps ensure that the environment is kept consistent as they
get installed one-by-one.
The current implementation creates a topological ordering of the
dependency graph, while breaking any cycles in the graph at arbitrary
points. We make no guarantees about where the cycle would be broken,
other than they would be broken.
"""
assert self._result is not None, "must call resolve() first"
graph = self._result.graph
weights = get_topological_weights(graph)
sorted_items = sorted(
req_set.requirements.items(),
key=functools.partial(_req_set_item_sorter, weights=weights),
reverse=True,
)
return [ireq for _, ireq in sorted_items]
def get_topological_weights(graph):
# type: (Graph) -> Dict[Optional[str], int]
"""Assign weights to each node based on how "deep" they are.
This implementation may change at any point in the future without prior
notice.
We take the length for the longest path to any node from root, ignoring any
paths that contain a single node twice (i.e. cycles). This is done through
a depth-first search through the graph, while keeping track of the path to
the node.
Cycles in the graph result would result in node being revisited while also
being it's own path. In this case, take no action. This helps ensure we
don't get stuck in a cycle.
When assigning weight, the longer path (i.e. larger length) is preferred.
"""
path = set() # type: Set[Optional[str]]
weights = {} # type: Dict[Optional[str], int]
def visit(node):
# type: (Optional[str]) -> None
if node in path:
# We hit a cycle, so we'll break it here.
return
# Time to visit the children!
path.add(node)
for child in graph.iter_children(node):
visit(child)
path.remove(node)
last_known_parent_count = weights.get(node, 0)
weights[node] = max(last_known_parent_count, len(path))
# `None` is guaranteed to be the root node by resolvelib.
visit(None)
# Sanity checks
assert weights[None] == 0
assert len(weights) == len(graph)
return weights
def _req_set_item_sorter(
item, # type: Tuple[str, InstallRequirement]
weights, # type: Dict[Optional[str], int]
):
# type: (...) -> Tuple[int, str]
"""Key function used to sort install requirements for installation.
Based on the "weight" mapping calculated in ``get_installation_order()``.
The canonical package name is returned as the second member as a tie-
breaker to ensure the result is predictable, which is useful in tests.
"""
name = canonicalize_name(item[0])
return weights[name], name
|
class AbsAlgorithm(object):
def output(self):
pass
|
"""Top-level package for brawl_stars_agent."""
__author__ = """Research 2 use"""
__email__ = 'research2use@hotmail.com'
__version__ = '0.1.0'
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
record of files
naming for same name files: file.gif, file-1.gif, file-2.gif etc
"""
from __future__ import unicode_literals
import base64
import hashlib
import imghdr
import io
import json
import mimetypes
import os
import re
import shutil
import zipfile
import requests
import requests.exceptions
from PIL import Image, ImageFile, ImageOps
from six import PY2, StringIO, string_types, text_type
from six.moves.urllib.parse import quote, unquote
import frappe
from frappe import _, conf
from frappe.model.document import Document
from frappe.utils import call_hook_method, cint, cstr, encode, get_files_path, get_hook_method, random_string, strip
class MaxFileSizeReachedError(frappe.ValidationError):
pass
class FolderNotEmpty(frappe.ValidationError):
pass
exclude_from_linked_with = True
ImageFile.LOAD_TRUNCATED_IMAGES = True
class File(Document):
no_feed_on_delete = True
def before_insert(self):
frappe.local.rollback_observers.append(self)
self.set_folder_name()
if self.file_name:
self.file_name = re.sub(r'/', '', self.file_name)
self.content = self.get("content", None)
self.decode = self.get("decode", False)
if self.content:
self.save_file(content=self.content, decode=self.decode)
def get_name_based_on_parent_folder(self):
if self.folder:
return "/".join([self.folder, self.file_name])
def autoname(self):
"""Set name for folder"""
if self.is_folder:
if self.folder:
self.name = self.get_name_based_on_parent_folder()
else:
# home
self.name = self.file_name
else:
self.name = frappe.generate_hash("", 10)
def after_insert(self):
if not self.is_folder:
self.add_comment_in_reference_doc('Attachment',
_('Added {0}').format("<a href='{file_url}' target='_blank'>{file_name}</a>{icon}".format(**{
"icon": ' <i class="uil uil-lock-alt text-info"></i>' if self.is_private else "",
"file_url": quote(self.file_url) if self.file_url else self.file_name,
"file_name": self.file_name or self.file_url
})))
def after_rename(self, olddn, newdn, merge=False):
for successor in self.get_successor():
setup_folder_path(successor[0], self.name)
def get_successor(self):
return frappe.db.get_values(doctype='File',
filters={'folder': self.name},
fieldname='name')
def validate(self):
if self.is_new():
self.set_is_private()
self.set_file_name()
self.validate_duplicate_entry()
self.validate_folder()
if not self.file_url and not self.flags.ignore_file_validate:
if not self.is_folder:
self.validate_file()
self.generate_content_hash()
if frappe.db.exists('File', {'name': self.name, 'is_folder': 0}):
old_file_url = self.file_url
if not self.is_folder and (self.is_private != self.db_get('is_private')):
private_files = frappe.get_site_path('private', 'files')
public_files = frappe.get_site_path('public', 'files')
file_name = self.file_url.split('/')[-1]
if not self.is_private:
shutil.move(os.path.join(private_files, file_name),
os.path.join(public_files, file_name))
self.file_url = "/files/{0}".format(file_name)
else:
shutil.move(os.path.join(public_files, file_name),
os.path.join(private_files, file_name))
self.file_url = "/private/files/{0}".format(file_name)
update_existing_file_docs(self)
# update documents image url with new file url
if self.attached_to_doctype and self.attached_to_name:
if not self.attached_to_field:
field_name = None
reference_dict = frappe.get_doc(self.attached_to_doctype, self.attached_to_name).as_dict()
for key, value in reference_dict.items():
if value == old_file_url:
field_name = key
break
self.attached_to_field = field_name
if self.attached_to_field:
frappe.db.set_value(self.attached_to_doctype, self.attached_to_name,
self.attached_to_field, self.file_url)
self.validate_url()
if self.file_url and (self.is_private != self.file_url.startswith('/private')):
frappe.throw(_('Invalid file URL. Please contact System Administrator.'))
def set_folder_name(self):
"""Make parent folders if not exists based on reference doctype and name"""
if self.attached_to_doctype and not self.folder:
self.folder = frappe.db.get_value("File", {"is_attachments_folder": 1})
def validate_folder(self):
if not self.is_home_folder and not self.folder and \
not self.flags.ignore_folder_validate:
self.folder = "Home"
def validate_file(self):
"""Validates existence of public file
TODO: validate for private file
"""
full_path = self.get_full_path()
if full_path.startswith('http'):
return True
if not os.path.exists(full_path):
frappe.throw(_("File {0} does not exist").format(self.file_url), IOError)
def validate_duplicate_entry(self):
if not self.flags.ignore_duplicate_entry_error and not self.is_folder:
if not self.content_hash:
self.generate_content_hash()
# check duplicate name
# check duplicate assignment
filters = {
'content_hash': self.content_hash,
'is_private': self.is_private,
'name': ('!=', self.name)
}
if self.attached_to_doctype and self.attached_to_name:
filters.update({
'attached_to_doctype': self.attached_to_doctype,
'attached_to_name': self.attached_to_name
})
duplicate_file = frappe.db.get_value('File', filters, ['name', 'file_url'], as_dict=1)
if duplicate_file:
duplicate_file_doc = frappe.get_cached_doc('File', duplicate_file.name)
if duplicate_file_doc.exists_on_disk():
# just use the url, to avoid uploading a duplicate
self.file_url = duplicate_file.file_url
def set_file_name(self):
if not self.file_name and self.file_url:
self.file_name = self.file_url.split('/')[-1]
else:
self.file_name = re.sub(r'/', '', self.file_name)
def generate_content_hash(self):
if self.content_hash or not self.file_url or self.file_url.startswith('http'):
return
file_name = self.file_url.split('/')[-1]
try:
with open(get_files_path(file_name, is_private=self.is_private), "rb") as f:
self.content_hash = get_content_hash(f.read())
except IOError:
frappe.msgprint(_("File {0} does not exist").format(self.file_url))
raise
def on_trash(self):
if self.is_home_folder or self.is_attachments_folder:
frappe.throw(_("Cannot delete Home and Attachments folders"))
self.check_folder_is_empty()
self.call_delete_file()
if not self.is_folder:
self.add_comment_in_reference_doc('Attachment Removed', _("Removed {0}").format(self.file_name))
def make_thumbnail(self, set_as_thumbnail=True, width=300, height=300, suffix="small", crop=False):
if self.file_url:
if self.file_url.startswith("/files"):
try:
image, filename, extn = get_local_image(self.file_url)
except IOError:
return
else:
try:
image, filename, extn = get_web_image(self.file_url)
except (requests.exceptions.HTTPError, requests.exceptions.SSLError, IOError, TypeError):
return
size = width, height
if crop:
image = ImageOps.fit(image, size, Image.ANTIALIAS)
else:
image.thumbnail(size, Image.ANTIALIAS)
thumbnail_url = filename + "_" + suffix + "." + extn
path = os.path.abspath(frappe.get_site_path("public", thumbnail_url.lstrip("/")))
try:
image.save(path)
if set_as_thumbnail:
self.db_set("thumbnail_url", thumbnail_url)
self.db_set("thumbnail_url", thumbnail_url)
except IOError:
frappe.msgprint(_("Unable to write file format for {0}").format(path))
return
return thumbnail_url
def check_folder_is_empty(self):
"""Throw exception if folder is not empty"""
files = frappe.get_all("File", filters={"folder": self.name}, fields=("name", "file_name"))
if self.is_folder and files:
frappe.throw(_("Folder {0} is not empty").format(self.name), FolderNotEmpty)
def call_delete_file(self):
"""If file not attached to any other record, delete it"""
if self.file_name and self.content_hash and (not frappe.db.count("File",
{"content_hash": self.content_hash, "name": ["!=", self.name]})):
self.delete_file_data_content()
elif self.file_url:
self.delete_file_data_content(only_thumbnail=True)
def on_rollback(self):
self.flags.on_rollback = True
self.on_trash()
def unzip(self):
'''Unzip current file and replace it by its children'''
if not ".zip" in self.file_name:
frappe.msgprint(_("Not a zip file"))
return
zip_path = frappe.get_site_path(self.file_url.strip('/'))
base_url = os.path.dirname(self.file_url)
files = []
with zipfile.ZipFile(zip_path) as z:
for file in z.filelist:
if file.is_dir() or file.filename.startswith('__MACOSX/'):
# skip directories and macos hidden directory
continue
filename = os.path.basename(file.filename)
if filename.startswith('.'):
# skip hidden files
continue
file_doc = frappe.new_doc('File')
file_doc.content = z.read(file.filename)
file_doc.file_name = filename
file_doc.folder = self.folder
file_doc.is_private = self.is_private
file_doc.attached_to_doctype = self.attached_to_doctype
file_doc.attached_to_name = self.attached_to_name
file_doc.save()
files.append(file_doc)
frappe.delete_doc('File', self.name)
return files
def get_file_url(self):
data = frappe.db.get_value("File", self.file_data_name, ["file_name", "file_url"], as_dict=True)
return data.file_url or data.file_name
def exists_on_disk(self):
exists = os.path.exists(self.get_full_path())
return exists
def get_content(self):
"""Returns [`file_name`, `content`] for given file name `fname`"""
if self.get('content'):
return self.content
file_path = self.get_full_path()
# read the file
if PY2:
with open(encode(file_path)) as f:
content = f.read()
else:
with io.open(encode(file_path), mode='rb') as f:
content = f.read()
try:
# for plain text files
content = content.decode()
except UnicodeDecodeError:
# for .png, .jpg, etc
pass
return content
def get_full_path(self):
"""Returns file path from given file name"""
file_path = self.file_url or self.file_name
if "/" not in file_path:
file_path = "/files/" + file_path
if file_path.startswith("/private/files/"):
file_path = get_files_path(*file_path.split("/private/files/", 1)[1].split("/"), is_private=1)
elif file_path.startswith("/files/"):
file_path = get_files_path(*file_path.split("/files/", 1)[1].split("/"))
elif file_path.startswith("http"):
pass
elif not self.file_url:
frappe.throw(_("There is some problem with the file url: {0}").format(file_path))
return file_path
def write_file(self):
"""write file to disk with a random name (to compare)"""
file_path = get_files_path(is_private=self.is_private)
if os.path.sep in self.file_name:
frappe.throw(_('File name cannot have {0}').format(os.path.sep))
# create directory (if not exists)
frappe.create_folder(file_path)
# write the file
self.content = self.get_content()
if isinstance(self.content, text_type):
self.content = self.content.encode()
with open(os.path.join(file_path.encode('utf-8'), self.file_name.encode('utf-8')), 'wb+') as f:
f.write(self.content)
return get_files_path(self.file_name, is_private=self.is_private)
def get_file_doc(self):
'''returns File object (Document) from given parameters or form_dict'''
r = frappe.form_dict
if self.file_url is None: self.file_url = r.file_url
if self.file_name is None: self.file_name = r.file_name
if self.attached_to_doctype is None: self.attached_to_doctype = r.doctype
if self.attached_to_name is None: self.attached_to_name = r.docname
if self.attached_to_field is None: self.attached_to_field = r.docfield
if self.folder is None: self.folder = r.folder
if self.is_private is None: self.is_private = r.is_private
if r.filedata:
file_doc = self.save_uploaded()
elif r.file_url:
file_doc = self.save()
return file_doc
def save_uploaded(self):
self.content = self.get_uploaded_content()
if self.content:
return self.save()
else:
raise Exception
def validate_url(self, df=None):
if self.file_url:
if not self.file_url.startswith(("http://", "https://", "/files/", "/private/files/")):
frappe.throw(_("URL must start with 'http://' or 'https://'"))
return
if not self.file_url.startswith(("http://", "https://")):
# local file
root_files_path = get_files_path(is_private=self.is_private)
if not os.path.commonpath([root_files_path]) == os.path.commonpath([root_files_path, self.get_full_path()]):
# basically the file url is skewed to not point to /files/ or /private/files
frappe.throw(_("{0} is not a valid file url").format(self.file_url))
self.file_url = unquote(self.file_url)
self.file_size = frappe.form_dict.file_size or self.file_size
def get_uploaded_content(self):
# should not be unicode when reading a file, hence using frappe.form
if 'filedata' in frappe.form_dict:
if "," in frappe.form_dict.filedata:
frappe.form_dict.filedata = frappe.form_dict.filedata.rsplit(",", 1)[1]
frappe.uploaded_content = base64.b64decode(frappe.form_dict.filedata)
return frappe.uploaded_content
elif self.content:
return self.content
frappe.msgprint(_('No file attached'))
return None
def save_file(self, content=None, decode=False, ignore_existing_file_check=False):
file_exists = False
self.content = content
if decode:
if isinstance(content, text_type):
self.content = content.encode("utf-8")
if b"," in self.content:
self.content = self.content.split(b",")[1]
self.content = base64.b64decode(self.content)
if not self.is_private:
self.is_private = 0
self.file_size = self.check_max_file_size()
self.content_hash = get_content_hash(self.content)
self.content_type = mimetypes.guess_type(self.file_name)[0]
duplicate_file = None
# check if a file exists with the same content hash and is also in the same folder (public or private)
if not ignore_existing_file_check:
duplicate_file = frappe.get_value("File", {
"content_hash": self.content_hash,
"is_private": self.is_private
},
["file_url", "name"], as_dict=True)
if duplicate_file:
file_doc = frappe.get_cached_doc('File', duplicate_file.name)
if file_doc.exists_on_disk():
self.file_url = duplicate_file.file_url
file_exists = True
if os.path.exists(encode(get_files_path(self.file_name, is_private=self.is_private))):
self.file_name = get_file_name(self.file_name, self.content_hash[-6:])
if not file_exists:
call_hook_method("before_write_file", file_size=self.file_size)
write_file_method = get_hook_method('write_file')
if write_file_method:
return write_file_method(self)
return self.save_file_on_filesystem()
def save_file_on_filesystem(self):
fpath = self.write_file()
if self.is_private:
self.file_url = "/private/files/{0}".format(self.file_name)
else:
self.file_url = "/files/{0}".format(self.file_name)
return {
'file_name': os.path.basename(fpath),
'file_url': self.file_url
}
def get_file_data_from_hash(self):
for name in frappe.db.sql_list("select name from `tabFile` where content_hash=%s and is_private=%s",
(self.content_hash, self.is_private)):
b = frappe.get_doc('File', name)
return {k: b.get(k) for k in frappe.get_hooks()['write_file_keys']}
return False
def check_max_file_size(self):
max_file_size = get_max_file_size()
file_size = len(self.content)
if file_size > max_file_size:
frappe.msgprint(_("File size exceeded the maximum allowed size of {0} MB").format(
max_file_size / 1048576),
raise_exception=MaxFileSizeReachedError)
return file_size
def delete_file_data_content(self, only_thumbnail=False):
method = get_hook_method('delete_file_data_content')
if method:
method(self, only_thumbnail=only_thumbnail)
else:
self.delete_file_from_filesystem(only_thumbnail=only_thumbnail)
def delete_file_from_filesystem(self, only_thumbnail=False):
"""Delete file, thumbnail from File document"""
if only_thumbnail:
delete_file(self.thumbnail_url)
else:
delete_file(self.file_url)
delete_file(self.thumbnail_url)
def is_downloadable(self):
return has_permission(self, 'read')
def get_extension(self):
'''returns split filename and extension'''
return os.path.splitext(self.file_name)
def add_comment_in_reference_doc(self, comment_type, text):
if self.attached_to_doctype and self.attached_to_name:
try:
doc = frappe.get_doc(self.attached_to_doctype, self.attached_to_name)
doc.add_comment(comment_type, text)
except frappe.DoesNotExistError:
frappe.clear_messages()
def set_is_private(self):
if self.file_url:
self.is_private = cint(self.file_url.startswith('/private'))
def on_doctype_update():
frappe.db.add_index("File", ["attached_to_doctype", "attached_to_name"])
def make_home_folder():
home = frappe.get_doc({
"doctype": "File",
"is_folder": 1,
"is_home_folder": 1,
"file_name": _("Home")
}).insert()
frappe.get_doc({
"doctype": "File",
"folder": home.name,
"is_folder": 1,
"is_attachments_folder": 1,
"file_name": _("Attachments")
}).insert()
@frappe.whitelist()
def create_new_folder(file_name, folder):
""" create new folder under current parent folder """
file = frappe.new_doc("File")
file.file_name = file_name
file.is_folder = 1
file.folder = folder
file.insert()
@frappe.whitelist()
def move_file(file_list, new_parent, old_parent):
if isinstance(file_list, string_types):
file_list = json.loads(file_list)
for file_obj in file_list:
setup_folder_path(file_obj.get("name"), new_parent)
# recalculate sizes
frappe.get_doc("File", old_parent).save()
frappe.get_doc("File", new_parent).save()
def setup_folder_path(filename, new_parent):
file = frappe.get_doc("File", filename)
file.folder = new_parent
file.save()
if file.is_folder:
from frappe.model.rename_doc import rename_doc
rename_doc("File", file.name, file.get_name_based_on_parent_folder(), ignore_permissions=True)
def get_extension(filename, extn, content):
mimetype = None
if extn:
# remove '?' char and parameters from extn if present
if '?' in extn:
extn = extn.split('?', 1)[0]
mimetype = mimetypes.guess_type(filename + "." + extn)[0]
if mimetype is None or not mimetype.startswith("image/") and content:
# detect file extension by reading image header properties
extn = imghdr.what(filename + "." + (extn or ""), h=content)
return extn
def get_local_image(file_url):
file_path = frappe.get_site_path("public", file_url.lstrip("/"))
try:
image = Image.open(file_path)
except IOError:
frappe.msgprint(_("Unable to read file format for {0}").format(file_url), raise_exception=True)
content = None
try:
filename, extn = file_url.rsplit(".", 1)
except ValueError:
# no extn
with open(file_path, "r") as f:
content = f.read()
filename = file_url
extn = None
extn = get_extension(filename, extn, content)
return image, filename, extn
def get_web_image(file_url):
# download
file_url = frappe.utils.get_url(file_url)
r = requests.get(file_url, stream=True)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if "404" in e.args[0]:
frappe.msgprint(_("File '{0}' not found").format(file_url))
else:
frappe.msgprint(_("Unable to read file format for {0}").format(file_url))
raise
image = Image.open(StringIO(frappe.safe_decode(r.content)))
try:
filename, extn = file_url.rsplit("/", 1)[1].rsplit(".", 1)
except ValueError:
# the case when the file url doesn't have filename or extension
# but is fetched due to a query string. example: https://encrypted-tbn3.gstatic.com/images?q=something
filename = get_random_filename()
extn = None
extn = get_extension(filename, extn, r.content)
filename = "/files/" + strip(unquote(filename))
return image, filename, extn
def delete_file(path):
"""Delete file from `public folder`"""
if path:
if ".." in path.split("/"):
frappe.msgprint(_("It is risky to delete this file: {0}. Please contact your System Manager.").format(path))
parts = os.path.split(path.strip("/"))
if parts[0]=="files":
path = frappe.utils.get_site_path("public", "files", parts[-1])
else:
path = frappe.utils.get_site_path("private", "files", parts[-1])
path = encode(path)
if os.path.exists(path):
os.remove(path)
def remove_file(fid=None, attached_to_doctype=None, attached_to_name=None):
"""Remove file and File entry"""
file_name = None
if not (attached_to_doctype and attached_to_name):
attached = frappe.db.get_value("File", fid,
["attached_to_doctype", "attached_to_name", "file_name"])
if attached:
attached_to_doctype, attached_to_name, file_name = attached
ignore_permissions, comment = False, None
if attached_to_doctype and attached_to_name:
doc = frappe.get_doc(attached_to_doctype, attached_to_name)
ignore_permissions = doc.has_permission("write") or False
if frappe.flags.in_web_form:
ignore_permissions = True
if not file_name:
file_name = frappe.db.get_value("File", fid, "file_name")
comment = doc.add_comment("Attachment Removed", _("Removed {0}").format(file_name))
frappe.delete_doc("File", fid, ignore_permissions=ignore_permissions)
return comment
def get_max_file_size():
return cint(conf.get('max_file_size')) or 10485760
def remove_all(dt, dn):
"""remove all files in a transaction"""
try:
for fid in frappe.db.sql_list("""select name from `tabFile` where
attached_to_doctype=%s and attached_to_name=%s""", (dt, dn)):
remove_file(fid=fid, attached_to_doctype=dt, attached_to_name=dn)
except Exception as e:
if e.args[0]!=1054: raise # (temp till for patched)
def has_permission(doc, ptype=None, user=None):
has_access = False
user = user or frappe.session.user
if ptype == 'create':
has_access = frappe.has_permission('File', 'create', user=user)
if doc.owner in [user, 'Guest'] or user == 'Administrator':
if not (doc.owner == "Guest" and doc.is_private):
has_access = True
if doc.attached_to_doctype and doc.attached_to_name:
attached_to_doctype = doc.attached_to_doctype
attached_to_name = doc.attached_to_name
try:
ref_doc = frappe.get_doc(attached_to_doctype, attached_to_name)
if ptype in ['write', 'create', 'delete']:
has_access = ref_doc.has_permission('write')
if ptype == 'delete' and not has_access:
frappe.throw(_("Cannot delete file as it belongs to {0} {1} for which you do not have permissions").format(
doc.attached_to_doctype, doc.attached_to_name),
frappe.PermissionError)
else:
has_access = ref_doc.has_permission('read')
except frappe.DoesNotExistError:
# if parent doc is not created before file is created
# we cannot check its permission so we will use file's permission
has_access = frappe.has_permission('File', ptype, user=user)
return has_access
def remove_file_by_url(file_url, doctype=None, name=None):
if doctype and name:
fid = frappe.db.get_value("File", {
"file_url": file_url,
"attached_to_doctype": doctype,
"attached_to_name": name})
else:
fid = frappe.db.get_value("File", {"file_url": file_url})
if fid:
return remove_file(fid=fid)
def get_content_hash(content):
if isinstance(content, text_type):
content = content.encode()
return hashlib.md5(content).hexdigest() #nosec
def get_file_name(fname, optional_suffix):
# convert to unicode
fname = cstr(fname)
f = fname.rsplit('.', 1)
if len(f) == 1:
partial, extn = f[0], ""
else:
partial, extn = f[0], "." + f[1]
return '{partial}{suffix}{extn}'.format(partial=partial, extn=extn, suffix=optional_suffix)
@frappe.whitelist()
def download_file(file_url):
"""
Download file using token and REST API. Valid session or
token is required to download private files.
Method : GET
Endpoint : frappe.core.doctype.file.file.download_file
URL Params : file_name = /path/to/file relative to site path
"""
file_doc = frappe.get_doc("File", {"file_url": file_url})
file_doc.check_permission("read")
frappe.local.response.filename = os.path.basename(file_url)
frappe.local.response.filecontent = file_doc.get_content()
frappe.local.response.type = "download"
def extract_images_from_doc(doc, fieldname):
content = doc.get(fieldname)
content = extract_images_from_html(doc, content)
if frappe.flags.has_dataurl:
doc.set(fieldname, content)
def extract_images_from_html(doc, content):
frappe.flags.has_dataurl = False
def _save_file(match):
data = match.group(1)
data = data.split("data:")[1]
headers, content = data.split(",")
if "filename=" in headers:
filename = headers.split("filename=")[-1]
# decode filename
if not isinstance(filename, text_type):
filename = text_type(filename, 'utf-8')
else:
mtype = headers.split(";")[0]
filename = get_random_filename(content_type=mtype)
doctype = doc.parenttype if doc.parent else doc.doctype
name = doc.parent or doc.name
_file = frappe.get_doc({
"doctype": "File",
"file_name": filename,
"attached_to_doctype": doctype,
"attached_to_name": name,
"content": content,
"decode": True
})
_file.save(ignore_permissions=True)
file_url = _file.file_url
if not frappe.flags.has_dataurl:
frappe.flags.has_dataurl = True
return '<img src="{file_url}"'.format(file_url=file_url)
if content and isinstance(content, string_types):
content = re.sub('<img[^>]*src\s*=\s*["\'](?=data:)(.*?)["\']', _save_file, content)
return content
def get_random_filename(extn=None, content_type=None):
if extn:
if not extn.startswith("."):
extn = "." + extn
elif content_type:
extn = mimetypes.guess_extension(content_type)
return random_string(7) + (extn or "")
@frappe.whitelist()
def unzip_file(name):
'''Unzip the given file and make file records for each of the extracted files'''
file_obj = frappe.get_doc('File', name)
files = file_obj.unzip()
return len(files)
@frappe.whitelist()
def get_attached_images(doctype, names):
'''get list of image urls attached in form
returns {name: ['image.jpg', 'image.png']}'''
if isinstance(names, string_types):
names = json.loads(names)
img_urls = frappe.db.get_list('File', filters={
'attached_to_doctype': doctype,
'attached_to_name': ('in', names),
'is_folder': 0
}, fields=['file_url', 'attached_to_name as docname'])
out = frappe._dict()
for i in img_urls:
out[i.docname] = out.get(i.docname, [])
out[i.docname].append(i.file_url)
return out
@frappe.whitelist()
def validate_filename(filename):
from frappe.utils import now_datetime
timestamp = now_datetime().strftime(" %Y-%m-%d %H:%M:%S")
fname = get_file_name(filename, timestamp)
return fname
@frappe.whitelist()
def get_files_in_folder(folder):
return frappe.db.get_all('File',
{ 'folder': folder },
['name', 'file_name', 'file_url', 'is_folder', 'modified']
)
def update_existing_file_docs(doc):
# Update is private and file url of all file docs that point to the same file
frappe.db.sql("""
UPDATE `tabFile`
SET
file_url = %(file_url)s,
is_private = %(is_private)s
WHERE
content_hash = %(content_hash)s
and name != %(file_name)s
""", dict(
file_url=doc.file_url,
is_private=doc.is_private,
content_hash=doc.content_hash,
file_name=doc.name
))
def attach_files_to_document(doc, event):
""" Runs on on_update hook of all documents.
Goes through every Attach and Attach Image field and attaches
the file url to the document if it is not already attached.
"""
attach_fields = doc.meta.get(
"fields", {"fieldtype": ["in", ["Attach", "Attach Image"]]}
)
for df in attach_fields:
# this method runs in on_update hook of all documents
# we dont want the update to fail if file cannot be attached for some reason
try:
value = doc.get(df.fieldname)
if not value or not value.startswith(("/files", "/private/files")):
return
if frappe.db.exists("File", {
"file_url": value,
"attached_to_name": doc.name,
"attached_to_doctype": doc.doctype,
"attached_to_field": df.fieldname,
}):
return
frappe.get_doc(
doctype="File",
file_url=value,
attached_to_name=doc.name,
attached_to_doctype=doc.doctype,
attached_to_field=df.fieldname,
folder=frappe.db.get_value("File", {"is_attachments_folder": 1}),
).insert()
except Exception:
frappe.log_error(title=_("Error Attaching File"))
|
"Provides basic training and validation with `Learner`"
from .torch_core import *
from .basic_data import *
from .callback import *
from .data_block import *
from .utils.ipython import gpu_mem_restore
import inspect
from fastprogress.fastprogress import format_time
from time import time
__all__ = ['Learner', 'LearnerCallback', 'Recorder', 'RecordOnCPU', 'fit', 'loss_batch', 'train_epoch', 'validate',
'get_preds', 'load_learner']
defaults.lr = slice(3e-3)
defaults.wd = 1e-2
defaults.extra_callbacks = None
def loss_batch(model:nn.Module, xb:Tensor, yb:Tensor, loss_func:OptLossFunc=None, opt:OptOptimizer=None,
cb_handler:Optional[CallbackHandler]=None)->Tuple[Union[Tensor,int,float,str]]:
"Calculate loss and metrics for a batch, call out to callbacks as necessary."
cb_handler = ifnone(cb_handler, CallbackHandler())
if not is_listy(xb): xb = [xb]
if not is_listy(yb): yb = [yb]
out = model(*xb)
out = cb_handler.on_loss_begin(out)
if not loss_func: return to_detach(out), yb[0].detach()
loss = loss_func(out, *yb)
if opt is not None:
loss,skip_bwd = cb_handler.on_backward_begin(loss)
if not skip_bwd: loss.backward()
if not cb_handler.on_backward_end(): opt.step()
if not cb_handler.on_step_end(): opt.zero_grad()
return loss.detach().cpu()
def get_preds(model:nn.Module, dl:DataLoader, pbar:Optional[PBar]=None, cb_handler:Optional[CallbackHandler]=None,
activ:nn.Module=None, loss_func:OptLossFunc=None, n_batch:Optional[int]=None) -> List[Tensor]:
"Tuple of predictions and targets, and optional losses (if `loss_func`) using `dl`, max batches `n_batch`."
res = [torch.cat(o).cpu() for o in
zip(*validate(model, dl, cb_handler=cb_handler, pbar=pbar, average=False, n_batch=n_batch))]
if loss_func is not None:
with NoneReduceOnCPU(loss_func) as lf: res.append(lf(res[0], res[1]))
if activ is not None: res[0] = activ(res[0])
return res
def validate(model:nn.Module, dl:DataLoader, loss_func:OptLossFunc=None, cb_handler:Optional[CallbackHandler]=None,
pbar:Optional[PBar]=None, average=True, n_batch:Optional[int]=None)->Iterator[Tuple[Union[Tensor,int],...]]:
"Calculate `loss_func` of `model` on `dl` in evaluation mode."
model.eval()
with torch.no_grad():
val_losses,nums = [],[]
if cb_handler: cb_handler.set_dl(dl)
for xb,yb in progress_bar(dl, parent=pbar, leave=(pbar is not None)):
if cb_handler: xb, yb = cb_handler.on_batch_begin(xb, yb, train=False)
val_loss = loss_batch(model, xb, yb, loss_func, cb_handler=cb_handler)
val_losses.append(val_loss)
if not is_listy(yb): yb = [yb]
nums.append(yb[0].shape[0])
if cb_handler and cb_handler.on_batch_end(val_losses[-1]): break
if n_batch and (len(nums)>=n_batch): break
nums = np.array(nums, dtype=np.float32)
if average: return (to_np(torch.stack(val_losses)) * nums).sum() / nums.sum()
else: return val_losses
def train_epoch(model:nn.Module, dl:DataLoader, opt:optim.Optimizer, loss_func:LossFunction)->None:
"Simple training of `model` for 1 epoch of `dl` using optim `opt` and loss function `loss_func`."
model.train()
for xb,yb in dl:
loss = loss_func(model(xb), yb)
loss.backward()
opt.step()
opt.zero_grad()
def fit(epochs:int, model:nn.Module, loss_func:LossFunction, opt:optim.Optimizer,
data:DataBunch, callbacks:Optional[CallbackList]=None, metrics:OptMetrics=None)->None:
"Fit the `model` on `data` and learn using `loss_func` and `opt`."
assert len(data.train_dl) != 0, f"""Your training dataloader is empty, can't train a model.
Use a smaller batch size (batch size={data.train_dl.batch_size} for {len(data.train_dl.dataset)} elements)."""
cb_handler = CallbackHandler(callbacks, metrics)
pbar = master_bar(range(epochs))
cb_handler.on_train_begin(epochs, pbar=pbar, metrics=metrics)
exception=False
try:
for epoch in pbar:
model.train()
cb_handler.set_dl(data.train_dl)
cb_handler.on_epoch_begin()
for xb,yb in progress_bar(data.train_dl, parent=pbar):
xb, yb = cb_handler.on_batch_begin(xb, yb)
loss = loss_batch(model, xb, yb, loss_func, opt, cb_handler)
if cb_handler.on_batch_end(loss): break
if not data.empty_val:
val_loss = validate(model, data.valid_dl, loss_func=loss_func,
cb_handler=cb_handler, pbar=pbar)
else: val_loss=None
if cb_handler.on_epoch_end(val_loss): break
except Exception as e:
exception = e
raise
finally: cb_handler.on_train_end(exception)
loss_func_name2activ = {'cross_entropy_loss': F.softmax, 'nll_loss': torch.exp, 'poisson_nll_loss': torch.exp,
'kl_div_loss': torch.exp, 'bce_with_logits_loss': torch.sigmoid, 'cross_entropy': F.softmax,
'kl_div': torch.exp, 'binary_cross_entropy_with_logits': torch.sigmoid,
}
def _loss_func_name2activ(name:str, axis:int=-1):
res = loss_func_name2activ[name]
if res == F.softmax: res = partial(F.softmax, dim=axis)
return res
def _loss_func2activ(loss_func):
if getattr(loss_func,'keywords',None):
if not loss_func.keywords.get('log_input', True): return
axis = getattr(loss_func, 'axis', -1)
# flattened loss
loss_func = getattr(loss_func, 'func', loss_func)
# could have a partial inside flattened loss! Duplicate on purpose.
loss_func = getattr(loss_func, 'func', loss_func)
cls_name = camel2snake(loss_func.__class__.__name__)
if cls_name == 'mix_up_loss':
loss_func = loss_func.crit
cls_name = camel2snake(loss_func.__class__.__name__)
if cls_name in loss_func_name2activ:
if cls_name == 'poisson_nll_loss' and (not getattr(loss_func, 'log_input', True)): return
return _loss_func_name2activ(cls_name, axis)
if getattr(loss_func,'__name__','') in loss_func_name2activ:
return _loss_func_name2activ(loss_func.__name__, axis)
return noop
@dataclass
class Learner():
"Trainer for `model` using `data` to minimize `loss_func` with optimizer `opt_func`."
data:DataBunch
model:nn.Module
opt_func:Callable=AdamW
loss_func:Callable=None
metrics:Collection[Callable]=None
true_wd:bool=True
bn_wd:bool=True
wd:Floats=defaults.wd
train_bn:bool=True
path:str = None
model_dir:PathOrStr = 'models'
callback_fns:Collection[Callable]=None
callbacks:Collection[Callback]=field(default_factory=list)
layer_groups:Collection[nn.Module]=None
add_time:bool=True
def __post_init__(self)->None:
"Setup path,metrics, callbacks and ensure model directory exists."
self.path = Path(ifnone(self.path, self.data.path))
(self.path/self.model_dir).mkdir(parents=True, exist_ok=True)
self.model = self.model.to(self.data.device)
self.loss_func = self.loss_func or self.data.loss_func
self.metrics=listify(self.metrics)
if not self.layer_groups: self.layer_groups = [nn.Sequential(*flatten_model(self.model))]
self.callbacks = listify(self.callbacks)
self.callback_fns = [partial(Recorder, add_time=self.add_time)] + listify(self.callback_fns)
def init(self, init): apply_init(self.model, init)
def _test_writeable_path(self):
path = self.path/self.model_dir
try: tmp_file = get_tmp_file(path)
except OSError as e:
raise Exception(f"{e}\nCan't write to '{path}', set `learn.model_dir` attribute in Learner to a full libpath path that is writable") from None
os.remove(tmp_file)
def lr_range(self, lr:Union[float,slice])->np.ndarray:
"Build differential learning rates from `lr`."
if not isinstance(lr,slice): return lr
if lr.start: res = even_mults(lr.start, lr.stop, len(self.layer_groups))
else: res = [lr.stop/10]*(len(self.layer_groups)-1) + [lr.stop]
return np.array(res)
def fit(self, epochs:int, lr:Union[Floats,slice]=defaults.lr,
wd:Floats=None, callbacks:Collection[Callback]=None)->None:
"Fit the model on this learner with `lr` learning rate, `wd` weight decay for `epochs` with `callbacks`."
lr = self.lr_range(lr)
if wd is None: wd = self.wd
if not getattr(self, 'opt', False): self.create_opt(lr, wd)
else: self.opt.lr,self.opt.wd = lr,wd
callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
if defaults.extra_callbacks is not None: callbacks += defaults.extra_callbacks
fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
callbacks=self.callbacks+callbacks)
def create_opt(self, lr:Floats, wd:Floats=0.)->None:
"Create optimizer with `lr` learning rate and `wd` weight decay."
self.opt = OptimWrapper.create(self.opt_func, lr, self.layer_groups, wd=wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
def split(self, split_on:SplitFuncOrIdxList)->None:
"Split the model at `split_on`."
if isinstance(split_on,Callable): split_on = split_on(self.model)
self.layer_groups = split_model(self.model, split_on)
return self
def freeze_to(self, n:int)->None:
"Freeze layers up to layer group `n`."
for g in self.layer_groups[:n]:
for l in g:
if not self.train_bn or not isinstance(l, bn_types): requires_grad(l, False)
for g in self.layer_groups[n:]: requires_grad(g, True)
self.create_opt(defaults.lr)
def freeze(self)->None:
"Freeze up to last layer group."
assert(len(self.layer_groups)>1)
self.freeze_to(-1)
self.create_opt(defaults.lr)
def unfreeze(self):
"Unfreeze entire model."
self.freeze_to(0)
self.create_opt(defaults.lr)
def export(self, fname:PathOrStr='export.pkl', destroy=False):
"Export the state of the `Learner` in `self.path/fname`."
if rank_distrib(): return # don't save if slave proc
args = ['opt_func', 'loss_func', 'metrics', 'true_wd', 'bn_wd', 'wd', 'train_bn', 'model_dir', 'callback_fns']
state = {a:getattr(self,a) for a in args}
state['cb_state'] = {cb.__class__:cb.get_state() for cb in self.callbacks}
#layer_groups -> need to find a way
#TO SEE: do we save model structure and weights separately?
with ModelOnCPU(self.model) as m:
state['model'] = m
xtra = dict(normalize=self.data.norm.keywords) if getattr(self.data, 'norm', False) else {}
state['data'] = self.data.valid_ds.get_state(**xtra)
state['cls'] = self.__class__
try_save(state, self.path, fname)
if destroy: self.destroy()
def save(self, name:PathOrStr, return_path:bool=False, with_opt:bool=True):
"Save model and optimizer state (if `with_opt`) with `name` to `self.model_dir`."
self._test_writeable_path()
if rank_distrib(): return # don't save if slave proc
path = self.path/self.model_dir/f'{name}.pth'
if not hasattr(self, 'opt'): with_opt=False
if not with_opt: state = get_model(self.model).state_dict()
else: state = {'model': get_model(self.model).state_dict(), 'opt':self.opt.state_dict()}
torch.save(state, path)
if return_path: return path
def dl(self, ds_type:DatasetType=DatasetType.Valid):
"Return DataLoader for DatasetType `ds_type`."
return self.data.dl(ds_type)
def load(self, name:PathOrStr, device:torch.device=None, strict:bool=True, with_opt:bool=None, purge:bool=True,
remove_module:bool=False):
"Load model and optimizer state (if `with_opt`) `name` from `self.model_dir` using `device`."
if purge: self.purge(clear_opt=ifnone(with_opt, False))
if device is None: device = self.data.device
elif isinstance(device, int): device = torch.device('cuda', device)
state = torch.load(self.path/self.model_dir/f'{name}.pth', map_location=device)
if set(state.keys()) == {'model', 'opt'}:
model_state = state['model']
if remove_module: model_state = remove_module_load(model_state)
get_model(self.model).load_state_dict(model_state, strict=strict)
if ifnone(with_opt,True):
if not hasattr(self, 'opt'): self.create_opt(defaults.lr, self.wd)
try: self.opt.load_state_dict(state['opt'])
except: pass
else:
if with_opt: warn("Saved filed doesn't contain an optimizer state.")
if remove_module: state = remove_module_load(state)
get_model(self.model).load_state_dict(state, strict=strict)
del state
gc.collect()
return self
def destroy(self):
"Free the Learner internals, leaving just an empty shell that consumes no memory"
class ZombieLearner(Learner):
msg = "this object has been destroyed"
def __getattr__(self, item): print(ZombieLearner.msg); return None
def destroyed(*args, **kwargs): print(ZombieLearner.msg)
attrs = [k for k in self.__dict__.keys() if not k.startswith("__")]
for a in attrs: delattr(self, a)
# the instance methods can still be called, but will just give a message
methods = [k for k in dir(self) if not k.startswith("__") and inspect.isroutine(getattr(self, k))]
for m in methods: setattr(self, m, ZombieLearner.destroyed)
self.__class__ = ZombieLearner
gc.collect()
print("this Learner object self-destroyed - it still exists, but no longer usable")
def purge(self, clear_opt:bool=True):
"Purge the `Learner` of all cached attributes to release some GPU memory."
self._test_writeable_path()
attrs_all = [k for k in self.__dict__.keys() if not k.startswith("__")]
attrs_pkl = ['bn_wd', 'callback_fns', 'layer_groups', 'loss_func', 'metrics', 'model',
'model_dir', 'opt_func', 'path', 'train_bn', 'true_wd', 'wd']
# +callbacks: get pickled too, but not directly
attrs_keep = ['data', 'recorder']
attrs_del = list(set(attrs_all) - set(attrs_keep))
state = {a:getattr(self, a) for a in attrs_pkl}
state['cb_state'] = {cb.__class__:cb.get_state() for cb in self.callbacks}
if hasattr(self, 'opt'): state['opt'] = self.opt.get_state()
tmp_file = get_tmp_file(self.path/self.model_dir)
torch.save(state, open(tmp_file, 'wb'))
for a in attrs_del: delattr(self, a)
gc.collect()
state = torch.load(tmp_file)
os.remove(tmp_file)
for a in attrs_pkl: setattr(self, a, state[a])
cb_state = state.pop('cb_state')
self.callbacks = [load_callback(c,s, self) for c,s in cb_state.items()]
if not clear_opt and 'opt' in state:
self.opt = OptimWrapper.load_with_state_and_layer_group(state['opt'], self.layer_groups)
del state
gc.collect()
return self
def get_preds(self, ds_type:DatasetType=DatasetType.Valid, with_loss:bool=False, n_batch:Optional[int]=None,
pbar:Optional[PBar]=None) -> List[Tensor]:
"Return predictions and targets on `ds_type` dataset."
lf = self.loss_func if with_loss else None
return get_preds(self.model, self.dl(ds_type), cb_handler=CallbackHandler(self.callbacks),
activ=_loss_func2activ(self.loss_func), loss_func=lf, n_batch=n_batch, pbar=pbar)
def pred_batch(self, ds_type:DatasetType=DatasetType.Valid, batch:Tuple=None, reconstruct:bool=False) -> List[Tensor]:
"Return output of the model on one batch from `ds_type` dataset."
if batch is not None: xb,yb = batch
else: xb,yb = self.data.one_batch(ds_type, detach=False, denorm=False)
cb_handler = CallbackHandler(self.callbacks)
xb,yb = cb_handler.on_batch_begin(xb,yb, train=False)
preds = loss_batch(self.model.eval(), xb, yb, cb_handler=cb_handler)
res = _loss_func2activ(self.loss_func)(preds[0])
if not reconstruct: return res
res = res.detach().cpu()
ds = self.dl(ds_type).dataset
norm = getattr(self.data, 'norm', False)
if norm and norm.keywords.get('do_y',False):
res = self.data.denorm(res, do_x=True)
return [ds.reconstruct(o) for o in res]
def backward(self, item):
"Pass `item` through the model and computes the gradient. Useful if `backward_hooks` are attached."
xb,yb = self.data.one_item(item)
loss = loss_batch(self.model.eval(), xb, yb, self.loss_func, opt=FakeOptimizer(),
cb_handler=CallbackHandler(self.callbacks))
return loss
def predict(self, item:ItemBase, **kwargs):
"Return predicted class, label and probabilities for `item`."
batch = self.data.one_item(item)
res = self.pred_batch(batch=batch)
pred,x = res[0],batch[0]
norm = getattr(self.data,'norm',False)
if norm:
x = self.data.denorm(x)
if norm.keywords.get('do_y',False): pred = self.data.denorm(pred)
ds = self.data.single_ds
pred = ds.y.analyze_pred(pred, **kwargs)
out = ds.y.reconstruct(pred, ds.x.reconstruct(x[0])) if has_arg(ds.y.reconstruct, 'x') else ds.y.reconstruct(pred)
return out, pred, res[0]
def validate(self, dl=None, callbacks=None, metrics=None):
"Validate on `dl` with potential `callbacks` and `metrics`."
dl = ifnone(dl, self.data.valid_dl)
metrics = ifnone(metrics, self.metrics)
cb_handler = CallbackHandler(self.callbacks + ifnone(callbacks, []), metrics)
cb_handler.on_epoch_begin()
val_metrics = validate(self.model, dl, self.loss_func, cb_handler)
cb_handler.on_epoch_end(val_metrics)
return cb_handler.state_dict['last_metrics']
def show_results(self, ds_type=DatasetType.Valid, rows:int=5, **kwargs):
"Show `rows` result of predictions on `ds_type` dataset."
#TODO: get read of has_arg x and split_kwargs_by_func if possible
#TODO: simplify this and refactor with pred_batch(...reconstruct=True)
n_items = rows ** 2 if self.data.train_ds.x._square_show_res else rows
if self.dl(ds_type).batch_size < n_items: n_items = self.dl(ds_type).batch_size
ds = self.dl(ds_type).dataset
self.callbacks.append(RecordOnCPU())
preds = self.pred_batch(ds_type)
*self.callbacks,rec_cpu = self.callbacks
x,y = rec_cpu.input,rec_cpu.target
norm = getattr(self.data,'norm',False)
if norm:
x = self.data.denorm(x)
if norm.keywords.get('do_y',False):
y = self.data.denorm(y, do_x=True)
preds = self.data.denorm(preds, do_x=True)
analyze_kwargs,kwargs = split_kwargs_by_func(kwargs, ds.y.analyze_pred)
preds = [ds.y.analyze_pred(grab_idx(preds, i), **analyze_kwargs) for i in range(n_items)]
xs = [ds.x.reconstruct(grab_idx(x, i)) for i in range(n_items)]
if has_arg(ds.y.reconstruct, 'x'):
ys = [ds.y.reconstruct(grab_idx(y, i), x=x) for i,x in enumerate(xs)]
zs = [ds.y.reconstruct(z, x=x) for z,x in zip(preds,xs)]
else :
ys = [ds.y.reconstruct(grab_idx(y, i)) for i in range(n_items)]
zs = [ds.y.reconstruct(z) for z in preds]
ds.x.show_xyzs(xs, ys, zs, **kwargs)
class RecordOnCPU(Callback):
"Store the `input` and `target` going through the model on the CPU."
def on_batch_begin(self, last_input,last_target,**kwargs):
self.input,self.target = to_cpu(last_input),to_cpu(last_target)
class LearnerCallback(Callback):
"Base class for creating callbacks for a `Learner`."
def __init__(self, learn):
self._learn = weakref.ref(learn)
self.exclude,self.not_min = ['_learn'],[]
setattr(self.learn, self.cb_name, self)
def __getattr__(self,k): return getattr(self.learn, k)
def __setstate__(self,data:Any): self.__dict__.update(data)
@property
def learn(self) -> Learner: return self._learn()
@learn.setter
def learn(self, learn: Learner) -> None: self._learn = weakref.ref(learn)
@property
def cb_name(self): return camel2snake(self.__class__.__name__)
class Recorder(LearnerCallback):
"A `LearnerCallback` that records epoch, loss, opt and metric data during training."
_order=-10
def __init__(self, learn:Learner, add_time:bool=True):
super().__init__(learn)
self.opt = self.learn.opt
self.train_dl = self.learn.data.train_dl
self.no_val,self.silent,self.add_time = False,False,add_time
def on_train_begin(self, pbar:PBar, metrics_names:Collection[str], **kwargs:Any)->None:
"Initialize recording status at beginning of training."
self.pbar = pbar
self.names = ['epoch', 'train_loss'] if self.no_val else ['epoch', 'train_loss', 'valid_loss']
self.names += metrics_names
if hasattr(self, '_added_met_names'): self.names += self._added_met_names
if self.add_time: self.names.append('time')
if not self.silent: self.pbar.write(self.names, table=True)
self.losses,self.val_losses,self.lrs,self.moms,self.metrics,self.nb_batches = [],[],[],[],[],[]
def on_epoch_begin(self, **kwargs:Any)->None:
if self.add_time: self.start_epoch = time()
def on_batch_begin(self, train, **kwargs:Any)->None:
"Record learning rate and momentum at beginning of batch."
if train:
self.lrs.append(self.opt.lr)
self.moms.append(self.opt.mom)
def on_backward_begin(self, smooth_loss:Tensor, **kwargs:Any)->None:
"Record the loss before any other callback has a chance to modify it."
self.losses.append(smooth_loss)
if self.pbar is not None and hasattr(self.pbar,'child'):
self.pbar.child.comment = f'{smooth_loss:.4f}'
def on_epoch_end(self, epoch:int, num_batch:int, smooth_loss:Tensor,
last_metrics=MetricsList, **kwargs:Any)->bool:
"Save epoch info: num_batch, smooth_loss, metrics."
self.nb_batches.append(num_batch)
if last_metrics is not None: self.val_losses.append(last_metrics[0])
else: last_metrics = [] if self.no_val else [None]
if len(last_metrics) > 1: self.metrics.append(last_metrics[1:])
self.format_stats([epoch, smooth_loss] + last_metrics)
def format_stats(self, stats:TensorOrNumList)->None:
"Format stats before printing."
str_stats = []
for name,stat in zip(self.names,stats):
str_stats.append('' if stat is None else str(stat) if isinstance(stat, int) else f'{stat:.6f}')
if self.add_time: str_stats.append(format_time(time() - self.start_epoch))
if not self.silent: self.pbar.write(str_stats, table=True)
def add_metric_names(self, names):
"Add `names` to the inner metric names."
self._added_met_names = names
def plot_lr(self, show_moms=False, skip_start:int=0, skip_end:int=0, return_fig:bool=None)->Optional[plt.Figure]:
"Plot learning rate, `show_moms` to include momentum."
iterations = range_of(self.lrs)
lrs = self.lrs[skip_start:-skip_end] if skip_end > 0 else self.lrs[skip_start:]
iterations = iterations[skip_start:-skip_end] if skip_end > 0 else iterations[skip_start:]
if show_moms:
moms = self.moms[skip_start:-skip_end] if skip_end > 0 else self.moms[skip_start:]
fig, axs = plt.subplots(1,2, figsize=(12,4))
axs[0].plot(iterations, lrs)
axs[0].set_xlabel('Iterations')
axs[0].set_ylabel('Learning Rate')
axs[1].plot(iterations, moms)
axs[1].set_xlabel('Iterations')
axs[1].set_ylabel('Momentum')
else:
fig, ax = plt.subplots()
ax.plot(iterations, lrs)
if ifnone(return_fig, defaults.return_fig): return fig
@staticmethod
def smoothen_by_spline(xs, ys, **kwargs):
xs = np.arange(len(ys))
spl = scipy.interpolate.UnivariateSpline(xs, ys, **kwargs)
ys = spl(xs)
return ys
def plot(self, skip_start:int=10, skip_end:int=5, suggestion:bool=False, return_fig:bool=None,
**kwargs)->Optional[plt.Figure]:
"Plot learning rate and losses, trimmed between `skip_start` and `skip_end`. Optionally plot and return min gradient"
lrs = self.lrs[skip_start:-skip_end] if skip_end > 0 else self.lrs[skip_start:]
losses = self.losses[skip_start:-skip_end] if skip_end > 0 else self.losses[skip_start:]
losses = [x.item() for x in losses]
if 'k' in kwargs: losses = self.smoothen_by_spline(lrs, losses, **kwargs)
fig, ax = plt.subplots(1,1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0e'))
if suggestion:
try: mg = (np.gradient(np.array(losses))).argmin()
except:
print("Failed to compute the gradients, there might not be enough points.")
return
print(f"Min numerical gradient: {lrs[mg]:.2E}")
ax.plot(lrs[mg],losses[mg],markersize=10,marker='o',color='red')
self.min_grad_lr = lrs[mg]
if ifnone(return_fig, defaults.return_fig): return fig
def plot_losses(self, skip_start:int=0, skip_end:int=0, return_fig:bool=None)->Optional[plt.Figure]:
"Plot training and validation losses."
fig, ax = plt.subplots(1,1)
iterations = range_of(self.losses)
losses = self.losses[skip_start:-skip_end] if skip_end > 0 else self.losses[skip_start:]
iterations = iterations[skip_start:-skip_end] if skip_end > 0 else iterations[skip_start:]
ax.plot(iterations, losses, label='Train')
val_iter = np.cumsum(self.nb_batches)
start_val = (val_iter - skip_start >= 0).nonzero()[0].min()
end_val = (val_iter[-1] - val_iter - skip_end >= 0).nonzero()[0].max()+1
val_iter = val_iter[start_val:end_val] if skip_end > 0 else val_iter[start_val:]
val_losses = self.val_losses[start_val:end_val] if skip_end > 0 else self.val_losses[start_val:]
ax.plot(val_iter, val_losses, label='Validation')
ax.set_ylabel('Loss')
ax.set_xlabel('Batches processed')
ax.legend()
if ifnone(return_fig, defaults.return_fig): return fig
def plot_metrics(self, return_fig:bool=None)->Optional[plt.Figure]:
"Plot metrics collected during training."
assert len(self.metrics) != 0, "There are no metrics to plot."
fig, axes = plt.subplots(len(self.metrics[0]),1,figsize=(6, 4*len(self.metrics[0])))
val_iter = self.nb_batches
val_iter = np.cumsum(val_iter)
axes = axes.flatten() if len(self.metrics[0]) != 1 else [axes]
for i, ax in enumerate(axes):
values = [met[i] for met in self.metrics]
ax.plot(val_iter, values)
if ifnone(return_fig, defaults.return_fig): return fig
class FakeOptimizer():
def step(self): pass
def zero_grad(self): pass
def load_callback(class_func, state, learn:Learner):
init_kwargs, others = split_kwargs_by_func(state, class_func.__init__)
res = class_func(learn, **init_kwargs) if issubclass(class_func, LearnerCallback) else class_func(**init_kwargs)
for k,v in others.items(): setattr(res, k, v)
return res
def load_learner(path:PathOrStr, fname:PathOrStr='export.pkl', test:ItemList=None, **db_kwargs):
"Load a `Learner` object saved with `export_state` in `path/fn` with empty data, optionally add `test` and load on `cpu`."
state = torch.load(Path(path)/fname, map_location='cpu') if defaults.device == torch.device('cpu') else torch.load(Path(path)/fname)
model = state.pop('model')
src = LabelLists.load_state(path, state.pop('data'))
if test is not None: src.add_test(test)
data = src.databunch(**db_kwargs)
cb_state = state.pop('cb_state')
clas_func = state.pop('cls')
res = clas_func(data, model, **state)
res.callback_fns = state['callback_fns'] #to avoid duplicates
res.callbacks = [load_callback(c,s, res) for c,s in cb_state.items()]
return res
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-15 14:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logger', '0005_requestlogger_modified'),
]
operations = [
migrations.CreateModel(
name='HTTPRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(auto_now_add=True, verbose_name='Datetime')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('ip', models.GenericIPAddressField(verbose_name='IP')),
('method', models.CharField(choices=[('GET', 'GET'), ('POST', 'POST'), ('PATCH', 'PATCH'), ('PUT', 'PUT'), ('HEAD', 'HEAD')], default='GET', max_length=10, verbose_name='Method')),
('api_version', models.PositiveSmallIntegerField(verbose_name='API version')),
('data', models.TextField(blank=True, null=True, verbose_name='Data')),
('sha1', models.CharField(blank=True, default=None, max_length=40, null=True, unique=True, verbose_name='SHA1')),
],
options={
'verbose_name': 'HTTP Request',
'verbose_name_plural': 'HTTP Requests',
},
),
]
|
"""This module provides an utility function to retrieve the global hook_manager singleton
in a Kedro's execution process.
"""
# pylint: disable=global-statement,invalid-name
import logging
from typing import Any, Iterable
from pluggy import PluginManager
from .markers import HOOK_NAMESPACE
from .specs import (
DataCatalogSpecs,
DatasetSpecs,
NodeSpecs,
PipelineSpecs,
RegistrationSpecs,
)
_hook_manager = None
_PLUGIN_HOOKS = "kedro.hooks" # entry-point to load hooks from for installed plugins
logger = logging.getLogger(__name__)
def _create_hook_manager() -> PluginManager:
"""Create a new PluginManager instance and register Kedro's hook specs."""
manager = PluginManager(HOOK_NAMESPACE)
manager.add_hookspecs(NodeSpecs)
manager.add_hookspecs(PipelineSpecs)
manager.add_hookspecs(DataCatalogSpecs)
manager.add_hookspecs(RegistrationSpecs)
manager.add_hookspecs(DatasetSpecs)
return manager
def get_hook_manager():
"""Create or return the global _hook_manager singleton instance."""
global _hook_manager
if _hook_manager is None:
_hook_manager = _create_hook_manager()
return _hook_manager
def _register_hooks(hook_manager: PluginManager, hooks: Iterable[Any]) -> None:
"""Register all hooks as specified in ``hooks`` with the global ``hook_manager``.
Args:
hook_manager: Hook manager instance to register the hooks with.
hooks: Hooks that need to be registered.
"""
for hooks_collection in hooks:
# Sometimes users might call hook registration more than once, in which
# case hooks have already been registered, so we perform a simple check
# here to avoid an error being raised and break user's workflow.
if not hook_manager.is_registered(hooks_collection):
hook_manager.register(hooks_collection)
def _register_hooks_setuptools(
hook_manager: PluginManager, disabled_plugins: Iterable[str]
) -> None:
"""Register pluggy hooks from setuptools entrypoints.
Args:
hook_manager: Hook manager instance to register the hooks with.
disabled_plugins: An iterable returning the names of plugins
which hooks must not be registered; any already registered
hooks will be unregistered.
"""
already_registered = hook_manager.get_plugins()
found = hook_manager.load_setuptools_entrypoints(_PLUGIN_HOOKS)
disabled_plugins = set(disabled_plugins)
# Get list of plugin/distinfo tuples for all setuptools registered plugins.
plugininfo = hook_manager.list_plugin_distinfo()
plugin_names = set()
disabled_plugin_names = set()
for plugin, dist in plugininfo:
if dist.project_name in disabled_plugins:
# `unregister()` is used instead of `set_blocked()` because
# we want to disable hooks for specific plugin based on project
# name and not `entry_point` name. Also, we log project names with
# version for which hooks were registered.
hook_manager.unregister(plugin=plugin)
found -= 1
disabled_plugin_names.add(f"{dist.project_name}-{dist.version}")
elif plugin not in already_registered:
plugin_names.add(f"{dist.project_name}-{dist.version}")
if disabled_plugin_names:
logger.info(
"Hooks are disabled for plugin(s): %s",
", ".join(sorted(disabled_plugin_names)),
)
if plugin_names:
logger.info(
"Registered hooks from %d installed plugin(s): %s",
found,
", ".join(sorted(plugin_names)),
)
|
M, P, N = [int(x) for x in input().split()]
s = [int(x) for x in input().split()] + [P]
now = 0
pos = 0
step = 0
if M >= P:
print('0')
exit()
for x in s:
if x <= M:
pos = x
else:
break
while pos < P:
if s[now] < pos:
now += 1
else:
step += 1
pos = s[now] + M
now += 1
if pos < s[now]:
print('IMPOSSIBLE')
exit()
print(step)
|
from jobtech.common import *
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/services/ad_group_simulation_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.resources import ad_group_simulation_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_ad__group__simulation__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/services/ad_group_simulation_service.proto',
package='google.ads.googleads.v6.services',
syntax='proto3',
serialized_options=b'\n$com.google.ads.googleads.v6.servicesB\035AdGroupSimulationServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v6/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V6.Services\312\002 Google\\Ads\\GoogleAds\\V6\\Services\352\002$Google::Ads::GoogleAds::V6::Services',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nBgoogle/ads/googleads/v6/services/ad_group_simulation_service.proto\x12 google.ads.googleads.v6.services\x1a;google/ads/googleads/v6/resources/ad_group_simulation.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"h\n\x1bGetAdGroupSimulationRequest\x12I\n\rresource_name\x18\x01 \x01(\tB2\xe0\x41\x02\xfa\x41,\n*googleads.googleapis.com/AdGroupSimulation2\xbd\x02\n\x18\x41\x64GroupSimulationService\x12\xd9\x01\n\x14GetAdGroupSimulation\x12=.google.ads.googleads.v6.services.GetAdGroupSimulationRequest\x1a\x34.google.ads.googleads.v6.resources.AdGroupSimulation\"L\x82\xd3\xe4\x93\x02\x36\x12\x34/v6/{resource_name=customers/*/adGroupSimulations/*}\xda\x41\rresource_name\x1a\x45\xca\x41\x18googleads.googleapis.com\xd2\x41\'https://www.googleapis.com/auth/adwordsB\x84\x02\n$com.google.ads.googleads.v6.servicesB\x1d\x41\x64GroupSimulationServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v6/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V6.Services\xca\x02 Google\\Ads\\GoogleAds\\V6\\Services\xea\x02$Google::Ads::GoogleAds::V6::Servicesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_ad__group__simulation__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETADGROUPSIMULATIONREQUEST = _descriptor.Descriptor(
name='GetAdGroupSimulationRequest',
full_name='google.ads.googleads.v6.services.GetAdGroupSimulationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.services.GetAdGroupSimulationRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002\372A,\n*googleads.googleapis.com/AdGroupSimulation', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=280,
serialized_end=384,
)
DESCRIPTOR.message_types_by_name['GetAdGroupSimulationRequest'] = _GETADGROUPSIMULATIONREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAdGroupSimulationRequest = _reflection.GeneratedProtocolMessageType('GetAdGroupSimulationRequest', (_message.Message,), {
'DESCRIPTOR' : _GETADGROUPSIMULATIONREQUEST,
'__module__' : 'google.ads.googleads.v6.services.ad_group_simulation_service_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.services.GetAdGroupSimulationRequest)
})
_sym_db.RegisterMessage(GetAdGroupSimulationRequest)
DESCRIPTOR._options = None
_GETADGROUPSIMULATIONREQUEST.fields_by_name['resource_name']._options = None
_ADGROUPSIMULATIONSERVICE = _descriptor.ServiceDescriptor(
name='AdGroupSimulationService',
full_name='google.ads.googleads.v6.services.AdGroupSimulationService',
file=DESCRIPTOR,
index=0,
serialized_options=b'\312A\030googleads.googleapis.com\322A\'https://www.googleapis.com/auth/adwords',
create_key=_descriptor._internal_create_key,
serialized_start=387,
serialized_end=704,
methods=[
_descriptor.MethodDescriptor(
name='GetAdGroupSimulation',
full_name='google.ads.googleads.v6.services.AdGroupSimulationService.GetAdGroupSimulation',
index=0,
containing_service=None,
input_type=_GETADGROUPSIMULATIONREQUEST,
output_type=google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_ad__group__simulation__pb2._ADGROUPSIMULATION,
serialized_options=b'\202\323\344\223\0026\0224/v6/{resource_name=customers/*/adGroupSimulations/*}\332A\rresource_name',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ADGROUPSIMULATIONSERVICE)
DESCRIPTOR.services_by_name['AdGroupSimulationService'] = _ADGROUPSIMULATIONSERVICE
# @@protoc_insertion_point(module_scope)
|
import numpy as np
from argparse import ArgumentParser
from cufacesearch.imgio.imgio import get_buffer_from_URL
from cufacesearch.featurizer.sbtf_img_featurizer import SentiBankTensorflowImgFeaturizer
from cufacesearch.featurizer.featsio import featB64decode
from cufacesearch.indexer.hbase_indexer_minimal import HBaseIndexerMinimal
from cufacesearch.featurizer.generic_featurizer import test_list_sha1
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-s", "--sha1s", dest="sha1s", default=test_list_sha1)
opts = parser.parse_args()
print opts
list_sha1s = opts.sha1s.split(",")
conf = {
"SBTFIMGFEAT_model_path": "./data/tfdeepsentibank.npy",
"SBTFIMGFEAT_imgmean_path": "./data/imagenet_mean.npy",
"HBI_host": "10.1.94.57",
"HBI_table_sha1infos": "escorts_images_sha1_infos_from_ts"
}
hbi = HBaseIndexerMinimal(conf, prefix="HBI_")
rows = hbi.get_columns_from_sha1_rows(list_sha1s, columns=["info:featnorm_cu","info:s3_url"])
sbtf = SentiBankTensorflowImgFeaturizer(conf)
for row in rows:
feat_hbase_b64 = featB64decode(row[1]["info:featnorm_cu"])
print feat_hbase_b64.shape
img_url = row[1]["info:s3_url"]
img_buffer = get_buffer_from_URL(img_url)
feat = sbtf.featurize(img_buffer)
print feat.shape
norm_feat = np.linalg.norm(feat)
normed_feat = feat/norm_feat
print np.linalg.norm(feat_hbase_b64-normed_feat)
# print np.linalg.norm(feat_hbase_b64)
# print np.linalg.norm(normed_feat)
|
#!/usr/bin/env python
"""OSX specific actions.
Most of these actions share an interface (in/out rdfvalues) with linux actions
of the same name. OSX-only actions are registered with the server via
libs/server_stubs.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import ctypes
import logging
import os
import re
import shutil
import socket
import struct
import sys
import pytsk3
from grr_response_client import actions
from grr_response_client import client_utils_common
from grr_response_client import client_utils_osx
from grr_response_client.client_actions import standard
from grr_response_client.osx import objc
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.parsers import osx_launchd
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.util import precondition
class Error(Exception):
"""Base error class."""
class UnsupportedOSVersionError(Error):
"""This action not supported on this os version."""
# https://github.com/apple/darwin-xnu/blob/master/bsd/sys/_types/_sa_family_t.h
#
# typedef __uint8_t sa_family_t;
sa_family_t = ctypes.c_uint8 # pylint: disable=invalid-name
# https://developer.apple.com/documentation/kernel/in_port_t?language=objc
#
# typedef __uint16_t in_port_t;
in_port_t = ctypes.c_uint16 # pylint: disable=invalid-name
# https://developer.apple.com/documentation/kernel/in_addr_t?language=objc
#
# typedef __uint32_t in_addr_t;
in_addr_t = ctypes.c_uint32 # pylint: disable=invalid-name
# https://github.com/apple/darwin-xnu/blob/master/bsd/netinet6/in6.h
#
# struct in6_addr {
# union {
# __uint8_t __u6_addr8[16];
# __uint16_t __u6_addr16[8];
# __uint32_t __u6_addr32[4];
# } __u6_addr; /* 128-bit IP6 address */
# };
in6_addr_t = ctypes.c_uint8 * 16 # pylint: disable=invalid-name
# https://github.com/apple/darwin-xnu/blob/master/bsd/sys/socket.h
#
# struct sockaddr {
# __uint8_t sa_len; /* total length */
# sa_family_t sa_family; /* [XSI] address family */
# char sa_data[14]; /* [XSI] addr value (actually larger) */
# };
class Sockaddr(ctypes.Structure):
"""The sockaddr structure."""
_fields_ = [
("sa_len", ctypes.c_uint8),
("sa_family", sa_family_t),
("sa_data", ctypes.c_ubyte * 14),
]
# https://github.com/apple/darwin-xnu/blob/master/bsd/net/if_dl.h
#
# struct sockaddr_dl {
# u_char sdl_len; /* Total length of sockaddr */
# u_char sdl_family; /* AF_LINK */
# u_short sdl_index; /* if != 0, system given index for interface */
# u_char sdl_type; /* interface type */
# u_char sdl_nlen; /* interface name length, no trailing 0 reqd. */
# u_char sdl_alen; /* link level address length */
# u_char sdl_slen; /* link layer selector length */
# char sdl_data[12]; /* minimum work area, can be larger;
# contains both if name and ll address */
# };
# Interfaces can have names up to 15 chars long and sdl_data contains name + mac
# but no separators - we need to make sdl_data at least 15+6 bytes.
class Sockaddrdl(ctypes.Structure):
"""The sockaddr_dl struct."""
_fields_ = [
("sdl_len", ctypes.c_ubyte),
("sdl_family", ctypes.c_ubyte),
("sdl_index", ctypes.c_ushort),
("sdl_type", ctypes.c_ubyte),
("sdl_nlen", ctypes.c_ubyte),
("sdl_alen", ctypes.c_ubyte),
("sdl_slen", ctypes.c_ubyte),
("sdl_data", ctypes.c_ubyte * 24),
]
# struct sockaddr_in {
# __uint8_t sin_len;
# sa_family_t sin_family;
# in_port_t sin_port;
# struct in_addr sin_addr;
# char sin_zero[8];
# };
class Sockaddrin(ctypes.Structure):
"""The sockaddr_in struct."""
_fields_ = [
("sin_len", ctypes.c_ubyte),
("sin_family", sa_family_t),
("sin_port", in_port_t),
("sin_addr", in_addr_t),
("sin_zero", ctypes.c_ubyte * 8),
]
# struct sockaddr_in6 {
# __uint8_t sin6_len; /* length of this struct */
# sa_family_t sin6_family; /* AF_INET6 (sa_family_t) */
# in_port_t sin6_port; /* Transport layer port */
# __uint32_t sin6_flowinfo; /* IP6 flow information */
# struct in6_addr sin6_addr; /* IP6 address */
# __uint32_t sin6_scope_id; /* scope zone index */
# };
class Sockaddrin6(ctypes.Structure):
"""The sockaddr_in6 struct."""
_fields_ = [
("sin6_len", ctypes.c_ubyte),
("sin6_family", sa_family_t),
("sin6_port", ctypes.c_ushort),
("sin6_flowinfo", ctypes.c_uint32),
("sin6_addr", in6_addr_t),
("sin6_scope_id", ctypes.c_uint32),
]
# struct ifaddrs *ifa_next; /* Pointer to next struct */
# char *ifa_name; /* Interface name */
# u_int ifa_flags; /* Interface flags */
# struct sockaddr *ifa_addr; /* Interface address */
# struct sockaddr *ifa_netmask; /* Interface netmask */
# struct sockaddr *ifa_broadaddr; /* Interface broadcast address */
# struct sockaddr *ifa_dstaddr; /* P2P interface destination */
# void *ifa_data; /* Address specific data */
class Ifaddrs(ctypes.Structure):
pass
Ifaddrs._fields_ = [ # pylint: disable=protected-access
("ifa_next", ctypes.POINTER(Ifaddrs)),
("ifa_name", ctypes.POINTER(ctypes.c_char)),
("ifa_flags", ctypes.c_uint),
("ifa_addr", ctypes.POINTER(Sockaddr)),
("ifa_netmask", ctypes.POINTER(Sockaddr)),
("ifa_broadaddr", ctypes.POINTER(Sockaddr)),
("ifa_destaddr", ctypes.POINTER(Sockaddr)),
("ifa_data", ctypes.c_void_p),
]
AF_INET = socket.AF_INET
AF_INET6 = socket.AF_INET6
AF_LINK = 0x12
def IterIfaddrs(ifaddrs):
"""Iterates over contents of the intrusive linked list of `ifaddrs`.
Args:
ifaddrs: A pointer to the first node of `ifaddrs` linked list. Can be NULL.
Yields:
Instances of `Ifaddr`.
"""
precondition.AssertOptionalType(ifaddrs, ctypes.POINTER(Ifaddrs))
while ifaddrs:
yield ifaddrs.contents
ifaddrs = ifaddrs.contents.ifa_next
def ParseIfaddrs(ifaddrs):
"""Parses contents of the intrusive linked list of `ifaddrs`.
Args:
ifaddrs: A pointer to the first node of `ifaddrs` linked list. Can be NULL.
Returns:
An iterator over instances of `rdf_client_network.Interface`.
"""
precondition.AssertOptionalType(ifaddrs, ctypes.POINTER(Ifaddrs))
ifaces = {}
for ifaddr in IterIfaddrs(ifaddrs):
ifname = ctypes.string_at(ifaddr.ifa_name).decode("utf-8")
iface = ifaces.setdefault(ifname, rdf_client_network.Interface())
iface.ifname = ifname
if not ifaddr.ifa_addr:
continue
sockaddr = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddr))
iffamily = sockaddr.contents.sa_family
if iffamily == AF_INET:
sockaddrin = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrin))
address = rdf_client_network.NetworkAddress()
address.address_type = rdf_client_network.NetworkAddress.Family.INET
address.packed_bytes = struct.pack("=L", sockaddrin.contents.sin_addr)
iface.addresses.append(address)
elif iffamily == AF_INET6:
sockaddrin = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrin6))
address = rdf_client_network.NetworkAddress()
address.address_type = rdf_client_network.NetworkAddress.Family.INET6
address.packed_bytes = bytes(list(sockaddrin.contents.sin6_addr))
iface.addresses.append(address)
elif iffamily == AF_LINK:
sockaddrdl = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrdl))
nlen = sockaddrdl.contents.sdl_nlen
alen = sockaddrdl.contents.sdl_alen
iface.mac_address = bytes(sockaddrdl.contents.sdl_data[nlen:nlen + alen])
else:
raise ValueError("Unexpected socket address family: %s" % iffamily)
return ifaces.values()
def EnumerateInterfacesFromClient(args):
"""Enumerate all MAC addresses."""
del args # Unused
libc = objc.LoadLibrary("c")
ifa = Ifaddrs()
p_ifa = ctypes.pointer(ifa)
libc.getifaddrs(ctypes.pointer(p_ifa))
for iface in ParseIfaddrs(p_ifa):
yield iface
libc.freeifaddrs(p_ifa)
class EnumerateInterfaces(actions.ActionPlugin):
"""Enumerate all MAC addresses of all NICs."""
out_rdfvalues = [rdf_client_network.Interface]
def Run(self, args):
for res in EnumerateInterfacesFromClient(args):
self.SendReply(res)
class GetInstallDate(actions.ActionPlugin):
"""Estimate the install date of this system."""
out_rdfvalues = [rdf_protodict.DataBlob]
def Run(self, unused_args):
for f in ["/var/log/CDIS.custom", "/var", "/private"]:
try:
ctime = os.stat(f).st_ctime
self.SendReply(rdfvalue.RDFDatetime.FromSecondsSinceEpoch(ctime))
return
except OSError:
pass
self.SendReply(rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0))
def EnumerateFilesystemsFromClient(args):
"""List all local filesystems mounted on this system."""
del args # Unused.
for fs_struct in client_utils_osx.GetFileSystems():
yield rdf_client_fs.Filesystem(
device=fs_struct.f_mntfromname,
mount_point=fs_struct.f_mntonname,
type=fs_struct.f_fstypename)
drive_re = re.compile("r?disk[0-9].*")
for drive in os.listdir("/dev"):
if not drive_re.match(drive):
continue
path = os.path.join("/dev", drive)
try:
img_inf = pytsk3.Img_Info(path)
# This is a volume or a partition - we send back a TSK device.
yield rdf_client_fs.Filesystem(device=path)
vol_inf = pytsk3.Volume_Info(img_inf)
for volume in vol_inf:
if volume.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC:
offset = volume.start * vol_inf.info.block_size
yield rdf_client_fs.Filesystem(
device="{path}:{offset}".format(path=path, offset=offset),
type="partition")
except (IOError, RuntimeError):
continue
class EnumerateFilesystems(actions.ActionPlugin):
"""Enumerate all unique filesystems local to the system."""
out_rdfvalues = [rdf_client_fs.Filesystem]
def Run(self, args):
for res in EnumerateFilesystemsFromClient(args):
self.SendReply(res)
def CreateServiceProto(job):
"""Create the Service protobuf.
Args:
job: Launchdjobdict from servicemanagement framework.
Returns:
sysinfo_pb2.OSXServiceInformation proto
"""
service = rdf_client.OSXServiceInformation(
label=job.get("Label"),
program=job.get("Program"),
sessiontype=job.get("LimitLoadToSessionType"),
ondemand=bool(job["OnDemand"]))
if job["LastExitStatus"] is not None:
service.lastexitstatus = int(job["LastExitStatus"])
if job["TimeOut"] is not None:
service.timeout = int(job["TimeOut"])
for arg in job.get("ProgramArguments", "", stringify=False):
# Returns CFArray of CFStrings
service.args.Append(str(arg))
mach_dict = job.get("MachServices", {}, stringify=False)
for key, value in mach_dict.items():
service.machservice.Append("%s:%s" % (key, value))
job_mach_dict = job.get("PerJobMachServices", {}, stringify=False)
for key, value in job_mach_dict.items():
service.perjobmachservice.Append("%s:%s" % (key, value))
if "PID" in job:
service.pid = job["PID"].value
return service
def GetRunningLaunchDaemons():
"""Get running launchd jobs from objc ServiceManagement framework."""
sm = objc.ServiceManagement()
return sm.SMGetJobDictionaries("kSMDomainSystemLaunchd")
def OSXEnumerateRunningServicesFromClient(args):
"""Get running launchd jobs.
Args:
args: Unused.
Yields:
`rdf_client.OSXServiceInformation` instances.
Raises:
UnsupportedOSVersionError: for OS X earlier than 10.6.
"""
del args # Unused.
osx_version = client_utils_osx.OSXVersion()
version_array = osx_version.VersionAsMajorMinor()
if version_array[:2] < [10, 6]:
raise UnsupportedOSVersionError(
"ServiceManagement API unsupported on < 10.6. This client is %s" %
osx_version.VersionString())
launchd_list = GetRunningLaunchDaemons()
parser = osx_launchd.OSXLaunchdJobDict(launchd_list)
for job in parser.Parse():
response = CreateServiceProto(job)
yield response
class OSXEnumerateRunningServices(actions.ActionPlugin):
"""Enumerate all running launchd jobs."""
in_rdfvalue = None
out_rdfvalues = [rdf_client.OSXServiceInformation]
def Run(self, args):
for res in OSXEnumerateRunningServicesFromClient(args):
self.SendReply(res)
class Uninstall(actions.ActionPlugin):
"""Remove the service that starts us at startup."""
out_rdfvalues = [rdf_protodict.DataBlob]
def Run(self, unused_arg):
"""This kills us with no cleanups."""
logging.debug("Disabling service")
msg = "Service disabled."
if hasattr(sys, "frozen"):
grr_binary = os.path.abspath(sys.executable)
elif __file__:
grr_binary = os.path.abspath(__file__)
try:
os.remove(grr_binary)
except OSError:
msg = "Could not remove binary."
try:
os.remove(config.CONFIG["Client.plist_path"])
except OSError:
if "Could not" in msg:
msg += " Could not remove plist file."
else:
msg = "Could not remove plist file."
# Get the directory we are running in from pyinstaller. This is either the
# GRR directory which we should delete (onedir mode) or a generated temp
# directory which we can delete without problems in onefile mode.
directory = getattr(sys, "_MEIPASS", None)
if directory:
shutil.rmtree(directory, ignore_errors=True)
self.SendReply(rdf_protodict.DataBlob(string=msg))
class UpdateAgent(standard.ExecuteBinaryCommand):
"""Updates the GRR agent to a new version."""
def ProcessFile(self, path, args):
cmd = "/usr/sbin/installer"
cmd_args = ["-pkg", path, "-target", "/"]
time_limit = args.time_limit
res = client_utils_common.Execute(
cmd, cmd_args, time_limit=time_limit, bypass_allowlist=True)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
self.SendReply(
rdf_client_action.ExecuteBinaryResponse(
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used)))
|
# nxpy_svn --------------------------------------------------------------------
# Copyright Nicola Musatti 2018 - 2019
# Use, modification, and distribution are subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# See https://github.com/nmusatti/nxpy/tree/master/libs/svn. ------------------
r"""
Packaging information.
"""
import codecs
import os
from setuptools import setup
lib_name = 'nxpy_svn'
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here,'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=lib_name,
version="1.0.3",
author="Nicola Musatti",
author_email="nicola.musatti@gmail.com",
description="Python wrapper for Subversion",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/nmusatti/nxpy",
project_urls={
"Documentation": "https://nxpy.readthedocs.io/en/latest/",
"Source Code": "https://github.com/nmusatti/nxpy",
},
license="Boost Software License 1.0 (BSL-1.0)",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Boost Software License 1.0 (BSL-1.0)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries',
],
namespace_packages=['nxpy'],
packages=['nxpy.svn'],
install_requires=[
'six',
'nxpy_command',
'nxpy_core',
'nxpy_past',
'nxpy_temp_file',
],
)
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import logging
import numpy as np
import pycocotools.mask as mask_util
import torch
from fvcore.common.file_io import PathManager
from PIL import Image, ImageOps
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
from . import transforms as T
from .catalog import MetadataCatalog
class SizeMismatchError(ValueError):
"""
When loaded image has difference width/height compared with annotation.
"""
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def convert_image_to_rgb(image, format):
"""
Convert an image from given format to RGB.
Args:
image (np.ndarray or Tensor): an HWC image
format (str): the format of input image, also see `read_image`
Returns:
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
"""
if isinstance(image, torch.Tensor):
image = image.cpu().numpy()
if format == "BGR":
image = image[:, :, [2, 1, 0]]
elif format == "YUV-BT.601":
image = np.dot(image, np.array(_M_YUV2RGB).T)
image = image * 255.0
else:
if format == "L":
image = image[:, :, 0]
image = image.astype(np.uint8)
image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
return image
def read_image(file_name, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601"
Returns:
image (np.ndarray): an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with PathManager.open(file_name, "rb") as f:
image = Image.open(f)
# capture and ignore this bug: https://github.com/python-pillow/Pillow/issues/3973
try:
image = ImageOps.exif_transpose(image)
except Exception:
pass
return convert_PIL_to_numpy(image, format)
def check_image_size(dataset_dict, image):
"""
Raise an error if the image does not match the size specified in the dict.
"""
if "width" in dataset_dict or "height" in dataset_dict:
image_wh = (image.shape[1], image.shape[0])
expected_wh = (dataset_dict["width"], dataset_dict["height"])
if not image_wh == expected_wh:
raise SizeMismatchError(
"Mismatched (W,H){}, got {}, expect {}".format(
" for image " + dataset_dict["file_name"]
if "file_name" in dataset_dict
else "",
image_wh,
expected_wh,
)
)
# To ensure bbox always remap to original image size
if "width" not in dataset_dict:
dataset_dict["width"] = image.shape[1]
if "height" not in dataset_dict:
dataset_dict["height"] = image.shape[0]
def transform_proposals(dataset_dict, image_shape, transforms, min_box_side_len, proposal_topk):
"""
Apply transformations to the proposals in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
min_box_side_len (int): keep proposals with at least this size
proposal_topk (int): only keep top-K scoring proposals
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits".
"""
if "proposal_boxes" in dataset_dict:
# Transform proposal boxes
boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("proposal_boxes"),
dataset_dict.pop("proposal_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
boxes = Boxes(boxes)
objectness_logits = torch.as_tensor(
dataset_dict.pop("proposal_objectness_logits").astype("float32")
)
boxes.clip(image_shape)
keep = boxes.nonempty(threshold=min_box_side_len)
boxes = boxes[keep]
objectness_logits = objectness_logits[keep]
proposals = Instances(image_shape)
proposals.proposal_boxes = boxes[:proposal_topk]
proposals.objectness_logits = objectness_logits[:proposal_topk]
dataset_dict["proposals"] = proposals
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# Note that bbox is 1d (per-instance bounding box)
annotation["bbox"] = transforms.apply_box([bbox])[0]
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "segmentation" in annotation:
# each instance contains 1 or more polygons
segm = annotation["segmentation"]
if isinstance(segm, list):
# polygons
polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
annotation["segmentation"] = [
p.reshape(-1) for p in transforms.apply_polygons(polygons)
]
elif isinstance(segm, dict):
# RLE
mask = mask_util.decode(segm)
mask = transforms.apply_segmentation(mask)
assert tuple(mask.shape[:2]) == image_size
annotation["segmentation"] = mask
else:
raise ValueError(
"Cannot transform segmentation of type '{}'!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict.".format(type(segm))
)
if "keypoints" in annotation:
keypoints = transform_keypoint_annotations(
annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
)
annotation["keypoints"] = keypoints
return annotation
def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
"""
Transform keypoint annotations of an image.
Args:
keypoints (list[float]): Nx3 float in Detectron2 Dataset format.
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
"""
# (N*3,) -> (N, 3)
keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
keypoints[:, :2] = transforms.apply_coords(keypoints[:, :2])
# This assumes that HorizFlipTransform is the only one that does flip
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
# Alternative way: check if probe points was horizontally flipped.
# probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
# probe_aug = transforms.apply_coords(probe.copy())
# do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
# If flipped, swap each keypoint with its opposite-handed equivalent
if do_hflip:
assert keypoint_hflip_indices is not None
keypoints = keypoints[keypoint_hflip_indices, :]
# Maintain COCO convention that if visibility == 0, then x, y = 0
# TODO may need to reset visibility for cropped keypoints,
# but it does not matter for our existing algorithms
keypoints[keypoints[:, 2] == 0] = 0
return keypoints
def annotations_to_instances(annos, image_size, mask_format="polygon"):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = Boxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
masks = PolygonMasks(segms)
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a full-image segmentation mask "
"as a 2D ndarray.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "keypoints" in annos[0]:
kpts = [obj.get("keypoints", []) for obj in annos]
target.gt_keypoints = Keypoints(kpts)
return target
def annotations_to_instances_rotated(annos, image_size):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Compared to `annotations_to_instances`, this function is for rotated boxes only
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
Containing fields "gt_boxes", "gt_classes",
if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [obj["bbox"] for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = RotatedBoxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
return target
def filter_empty_instances(instances, by_box=True, by_mask=True, box_threshold=1e-5):
"""
Filter out empty instances in an `Instances` object.
Args:
instances (Instances):
by_box (bool): whether to filter out instances with empty boxes
by_mask (bool): whether to filter out instances with empty masks
box_threshold (float): minimum width and height to be considered non-empty
Returns:
Instances: the filtered instances.
"""
assert by_box or by_mask
r = []
if by_box:
r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
if instances.has("gt_masks") and by_mask:
r.append(instances.gt_masks.nonempty())
# TODO: can also filter visible keypoints
if not r:
return instances
m = r[0]
for x in r[1:]:
m = m & x
return instances[m]
def create_keypoint_hflip_indices(dataset_names):
"""
Args:
dataset_names (list[str]): list of dataset names
Returns:
ndarray[int]: a vector of size=#keypoints, storing the
horizontally-flipped keypoint indices.
"""
check_metadata_consistency("keypoint_names", dataset_names)
check_metadata_consistency("keypoint_flip_map", dataset_names)
meta = MetadataCatalog.get(dataset_names[0])
names = meta.keypoint_names
# TODO flip -> hflip
flip_map = dict(meta.keypoint_flip_map)
flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return np.asarray(flip_indices)
def gen_crop_transform_with_instance(crop_size, image_size, instance):
"""
Generate a CropTransform so that the cropping region contains
the center of the given instance.
Args:
crop_size (tuple): h, w in pixels
image_size (tuple): h, w
instance (dict): an annotation dict of one instance, in Detectron2's
dataset format.
"""
crop_size = np.asarray(crop_size, dtype=np.int32)
bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
assert (
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
), "The annotation bounding box is outside of the image!"
assert (
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
), "Crop size is larger than image size!"
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
def check_metadata_consistency(key, dataset_names):
"""
Check that the datasets have consistent metadata.
Args:
key (str): a metadata key
dataset_names (list[str]): a list of dataset names
Raises:
AttributeError: if the key does not exist in the metadata
ValueError: if the given datasets do not have the same metadata values defined by key
"""
if len(dataset_names) == 0:
return
logger = logging.getLogger(__name__)
entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
for idx, entry in enumerate(entries_per_dataset):
if entry != entries_per_dataset[0]:
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
)
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(
key, dataset_names[0], str(entries_per_dataset[0])
)
)
raise ValueError("Datasets have different metadata '{}'!".format(key))
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Now it includes resizing and flipping.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(
len(min_size)
)
logger = logging.getLogger(__name__)
tfm_gens = []
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
tfm_gens.append(T.RandomFlip())
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
|
#MenuTitle: Insert Brace Layers for Movement along Background Path
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Add a single path in the background and it will be used to create intermediate brace layers for OTVar animation.
"""
from Foundation import NSPoint, NSAffineTransform, NSAffineTransformStruct
import math
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def transform(shiftX=0.0, shiftY=0.0, rotate=0.0, skew=0.0, scale=1.0):
"""
Returns an NSAffineTransform object for transforming layers.
Apply an NSAffineTransform t object like this:
Layer.transform_checkForSelection_doComponents_(t,False,True)
Access its transformation matrix like this:
tMatrix = t.transformStruct() # returns the 6-float tuple
Apply the matrix tuple like this:
Layer.applyTransform(tMatrix)
Component.applyTransform(tMatrix)
Path.applyTransform(tMatrix)
Chain multiple NSAffineTransform objects t1, t2 like this:
t1.appendTransform_(t2)
"""
myTransform = NSAffineTransform.transform()
if rotate:
myTransform.rotateByDegrees_(rotate)
if scale != 1.0:
myTransform.scaleBy_(scale)
if not (shiftX == 0.0 and shiftY == 0.0):
myTransform.translateXBy_yBy_(shiftX,shiftY)
if skew:
skewStruct = NSAffineTransformStruct()
skewStruct.m11 = 1.0
skewStruct.m22 = 1.0
skewStruct.m21 = math.tan(math.radians(skew))
skewTransform = NSAffineTransform.transform()
skewTransform.setTransformStruct_(skewStruct)
myTransform.appendTransform_(skewTransform)
return myTransform
def shiftedLayer( originalLayer, shiftTransform ):
shiftedLayer = originalLayer.copy()
shiftedLayer.applyTransform( shiftTransform )
return shiftedLayer
def bezier( P1, P2, P3, P4, t ):
"""
Returns coordinates for t (=0.0...1.0) on curve segment.
x1,y1 and x4,y4: coordinates of on-curve nodes
x2,y2 and x3,y3: coordinates of BCPs
"""
x1, y1 = P1.x, P1.y
x2, y2 = P2.x, P2.y
x3, y3 = P3.x, P3.y
x4, y4 = P4.x, P4.y
x = x1*(1-t)**3 + x2*3*t*(1-t)**2 + x3*3*t**2*(1-t) + x4*t**3
y = y1*(1-t)**3 + y2*3*t*(1-t)**2 + y3*3*t**2*(1-t) + y4*t**3
return NSPoint(x, y)
def process( thisLayer, steps=5 ):
thisGlyph = thisLayer.parent
for i in range(len(thisGlyph.layers))[::-1]:
thisLayer = thisGlyph.layers[i]
if thisLayer.layerId != thisLayer.associatedMasterId:
del thisGlyph.layers[i]
shifts = []
movePath = thisLayer.background.paths[0]
originPoint = movePath.nodes[0]
if movePath:
for thisSegment in movePath.segments:
print(thisSegment)
# curve segments:
if len(thisSegment) == 4:
for i in range(steps):
offsetPoint = bezier(
thisSegment[0].pointValue(),
thisSegment[1].pointValue(),
thisSegment[2].pointValue(),
thisSegment[3].pointValue(),
i*1.0/steps
)
shiftTransform = transform(
shiftX = offsetPoint.x-originPoint.x,
shiftY = offsetPoint.y-originPoint.y
).transformStruct()
shifts.append( shiftTransform )
# line segment:
elif len(thisSegment) == 2:
P1 = thisSegment[0].pointValue()
P2 = thisSegment[1].pointValue()
for i in range(steps):
shiftTransform = transform(
shiftX = (P1.x+i*(P2.x-P1.x)/steps)-originPoint.x,
shiftY = (P1.y+i*(P2.y-P1.y)/steps)-originPoint.y
).transformStruct()
shifts.append( shiftTransform )
# all segments are collected in 'shifts':
print(shifts)
firstMaster = thisLayer.parent.parent.masters[0]
secondMaster = thisLayer.parent.parent.masters[1]
firstMasterValue = firstMaster.weightValue
secondMasterValue = secondMaster.weightValue
frameCount = len(shifts)
stepWidth = (secondMasterValue-firstMasterValue)/frameCount
for i in range(len(shifts)):
frameTransform = shifts[i]
frameValue = firstMasterValue + i * stepWidth
braceLayer = shiftedLayer( thisLayer, frameTransform )
braceLayer.name = "{%i}" % frameValue
thisLayer.parent.layers.append( braceLayer )
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
for thisLayer in selectedLayers:
thisGlyph = thisLayer.parent
print("Processing %s" % thisGlyph.name)
thisGlyph.beginUndo() # begin undo grouping
process( thisLayer )
thisGlyph.endUndo() # end undo grouping
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
|
from policy.models import DcPolicy, DcPolicyHistory
from rest_framework import serializers
class DcPolicyStateHistorySerializer(serializers.ModelSerializer):
created_at = serializers.DateTimeField(format="%d %B, %Y %H:%M:%S")
class Meta:
model = DcPolicyHistory
fields = ('state','created_at')
class DcPolicySerializer(serializers.ModelSerializer):
state_history = serializers.SerializerMethodField(read_only=True)
def get_state_history(self, obj):
policy_history = DcPolicyHistory.objects.filter(policy=obj)
return DcPolicyStateHistorySerializer(policy_history, many=True).data
class Meta:
model = DcPolicy
fields = ('id', 'customer_id', 'type', 'premium','cover', 'state_history',)
|
'''
Inference code for VisTR
Modified from DETR (https://github.com/facebookresearch/detr)
'''
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
import torchvision.transforms as T
import matplotlib.pyplot as plt
import os
from PIL import Image
import math
import torch.nn.functional as F
import json
from scipy.optimize import linear_sum_assignment
import pycocotools.mask as mask_util
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=150, type=int)
parser.add_argument('--lr_drop', default=100, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--model_path', type=str, default=None,
help="Path to the model weights.")
# * Backbone
parser.add_argument('--backbone', default='resnet101', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=384, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_frames', default=36, type=int,
help="Number of frames")
parser.add_argument('--num_ins', default=10, type=int,
help="Number of instances")
parser.add_argument('--num_queries', default=360, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--img_path', default='data/ytvos/valid/JPEGImages/')
parser.add_argument('--ann_path', default='data/ytvos/annotations/instances_val_sub.json')
parser.add_argument('--save_path', default='results.json')
parser.add_argument('--dataset_file', default='ytvos')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='output_ytvos',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
#parser.add_argument('--eval', action='store_true')
parser.add_argument('--eval', action='store_false')
parser.add_argument('--num_workers', default=0, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
CLASSES=['person','giant_panda','lizard','parrot','skateboard','sedan','ape',
'dog','snake','monkey','hand','rabbit','duck','cat','cow','fish',
'train','horse','turtle','bear','motorbike','giraffe','leopard',
'fox','deer','owl','surfboard','airplane','truck','zebra','tiger',
'elephant','snowboard','boat','shark','mouse','frog','eagle','earless_seal',
'tennis_racket']
COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
[0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933],
[0.494, 0.000, 0.556], [0.494, 0.000, 0.000], [0.000, 0.745, 0.000],
[0.700, 0.300, 0.600]]
transform = T.Compose([
T.Resize(300),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# for output bounding box post-processing
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(out_bbox, size):
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
b = b.cpu() * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
def main(args):
# Test
start_time = time.time()
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
num_frames = args.num_frames
num_ins = args.num_ins
with torch.no_grad():
model, criterion, postprocessors = build_model(args)
model.to(device)
state_dict = torch.load(args.model_path)['model']
model.load_state_dict(state_dict)
folder = args.img_path
videos = json.load(open(args.ann_path,'rb'))['videos']
vis_num = len(videos)
# Test
process_start_time = time.time()
inference_time_acc = 0.0
frame_count = 0
vis_num = 10
result = []
for i in range(vis_num):
print("Process video: ",i)
id_ = videos[i]['id']
length = videos[i]['length']
file_names = videos[i]['file_names']
clip_num = math.ceil(length/num_frames)
img_set=[]
if length<num_frames:
clip_names = file_names*(math.ceil(num_frames/length))
clip_names = clip_names[:num_frames]
else:
clip_names = file_names[:num_frames]
if len(clip_names)==0:
continue
if len(clip_names)<num_frames:
clip_names.extend(file_names[:num_frames-len(clip_names)])
for k in range(num_frames):
im = Image.open(os.path.join(folder,clip_names[k]))
img_set.append(transform(im).unsqueeze(0).cuda())
img=torch.cat(img_set,0)
# Test
frame_count += len(img_set)
inference_start_time = time.time()
# inference time is calculated for this operation
outputs = model(img)
inference_time_acc += time.time() - inference_start_time
# end of model inference
logits, boxes, masks = outputs['pred_logits'].softmax(-1)[0,:,:-1], outputs['pred_boxes'][0], outputs['pred_masks'][0]
pred_masks =F.interpolate(masks.reshape(num_frames,num_ins,masks.shape[-2],masks.shape[-1]),(im.size[1],im.size[0]),mode="bilinear").sigmoid().cpu().detach().numpy()>0.5
pred_logits = logits.reshape(num_frames,num_ins,logits.shape[-1]).cpu().detach().numpy()
pred_masks = pred_masks[:length]
pred_logits = pred_logits[:length]
pred_scores = np.max(pred_logits,axis=-1)
pred_logits = np.argmax(pred_logits,axis=-1)
for m in range(num_ins):
if pred_masks[:,m].max()==0:
continue
score = pred_scores[:,m].mean()
#category_id = pred_logits[:,m][pred_scores[:,m].argmax()]
category_id = np.argmax(np.bincount(pred_logits[:,m]))
instance = {'video_id':id_, 'score':float(score), 'category_id':int(category_id)}
segmentation = []
for n in range(length):
if pred_scores[n,m]<0.001:
segmentation.append(None)
else:
mask = (pred_masks[n,m]).astype(np.uint8)
rle = mask_util.encode(np.array(mask[:,:,np.newaxis], order='F'))[0]
rle["counts"] = rle["counts"].decode("utf-8")
segmentation.append(rle)
instance['segmentations'] = segmentation
result.append(instance)
# Test
print('Inference time: ', inference_time_acc)
print('Frame count: ', frame_count)
print('Inference time per frame: ', inference_time_acc / frame_count)
print('Process time (include image read, copy to cuda, but not model build): ', time.time() - process_start_time)
with open(args.save_path, 'w', encoding='utf-8') as f:
json.dump(result,f)
# Test
print('Total runtime (model build + inference + image read + copy to cuda ...): ', time.time() - start_time)
if __name__ == '__main__':
parser = argparse.ArgumentParser('VisTR inference script', parents=[get_args_parser()])
args = parser.parse_args()
main(args)
|
from collections import OrderedDict
from unittest import TestCase
from pyjsonnlp import validation
from flairjsonnlp import FlairPipeline
from . import mocks
import pytest
text = "Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash."
def strip_scores(j):
"""Scores are non-deterministic"""
for s in j['documents'][1]['sentences']:
for label in s.get('labels', []):
label['scores']['label'] = 0
for e in j['documents'][1]['expressions']:
e['scores']['type'] = 0
for t in j['documents'][1]['tokenList']:
t['scores']['upos'] = 0
t['scores']['xpos'] = 0
t['scores']['entity'] = 0
if 'synsets' in t:
t['synsets'][0]['scores']['wordnetId'] = 0
class TestFlair(TestCase):
def test_process(self):
actual = FlairPipeline().process(text, fast=False, use_ontonotes=False)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'en'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'xpos': 'DT', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'xpos': 'NNP', 'entity': 'S-LOC', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'shift.v.01', 'scores': {'wordnetId': 0}}]}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'be.a.01', 'scores': {'wordnetId': 0}}]}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'xpos': 'PRP', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'MD', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VB', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'crash.v.01', 'scores': {'wordnetId': 0}}]}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [{'type': 'VP', 'scores': {'type': 0}, 'tokens': [18, 19]}])])])])
# assert actual == expected, actual
def test_process_fast(self):
actual = FlairPipeline().process(text, fast=True, use_ontonotes=False)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'en'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'xpos': 'DT', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'xpos': 'NNP', 'entity': 'S-LOC', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'shift.v.01', 'scores': {'wordnetId': 0}}]}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'be.a.01', 'scores': {'wordnetId': 0}}]}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'xpos': 'PRP', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'MD', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': '.', 'entity_iob': 'O'}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [{'type': 'VP', 'scores': {'type': 0}, 'tokens': [18, 19]}])])])])
# assert actual == expected, actual
def test_process_ontonotes(self):
actual = FlairPipeline().process(text, fast=True, use_ontonotes=True)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'en'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'xpos': 'DT', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'xpos': 'NNP', 'entity': 'S-GPE', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'shift.v.01', 'scores': {'wordnetId': 0}}]}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'be.a.01', 'scores': {'wordnetId': 0}}]}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'xpos': 'PRP', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'MD', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VB', 'entity_iob': 'O'}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [{'type': 'VP', 'scores': {'type': 0}, 'tokens': [18, 19]}])])])])
# assert actual == expected, actual
def test_process_multi(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_ontonotes=False)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'multi'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'entity': 0, 'xpos': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'entity': 'S-LOC', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'entity_iob': 'O'}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'entity_iob': 'O'}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'entity_iob': 'O'}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [])])])])
# assert actual == expected, actual
def test_invalid_language(self):
with pytest.raises(TypeError):
FlairPipeline().process(text, lang='martian')
def test_validation(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en'))
class TestFlairEmbeddings(TestCase):
def test_no_embeddings(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='', char_embeddings=False, bpe_size=0)
assert all(map(lambda t: 'embeddings' not in t, actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
def test_default_embeddings(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='default', char_embeddings=False, bpe_size=0)
assert all(map(lambda t: t['embeddings'][0]['model'] == 'Flair glove,multi-forward,multi-backward',
actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
def test_character_embeddings(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='', char_embeddings=True, bpe_size=0)
assert all(map(lambda t: t['embeddings'][0]['model'] == 'Flair ,char',
actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
def test_bpe(self):
actual = FlairPipeline().process(text, lang='en', fast=True, use_embeddings='', char_embeddings=False, bpe_size=50)
assert all(map(lambda t: t['embeddings'][0]['model'] == 'Flair ,byte-pair_50',
actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
with pytest.raises(ValueError):
FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='', char_embeddings=False, bpe_size=50)
with pytest.raises(ValueError):
FlairPipeline().process(text, lang='en', bpe_size=45)
def test_invalid(self):
with pytest.raises(ValueError):
FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='martian', char_embeddings=False, bpe_size=0)
def test_validation_default(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en', use_embeddings='default'))
def test_validation_bpe(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en', bpe_size=50))
def test_validation_chars(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en', char_embeddings=True))
|
"""
`digitalio`
========================================================
Copyright 2020 Alorium Technology. All rights reserved.
Contact: info@aloriumtech.com
Description:
This file is part of the Alorium Technology CiricuitPython Library Bundle
and provides a custom CircuitPython digitialio library for Evo M51.
"""
import struct
from aloriumtech import _evo
import digitalio
from digitalio import Direction, Pull, DriveMode
class DigitalInOut:
"""Digital input and output
A DigitalInOut is used to digitally control I/O pins. For analog control of
a pin, see the :py:class:`analogio.AnalogIn` and
:py:class:`analogio.AnalogOut` classes."""
_buffer = bytearray(4)
def __init__(self, pin):
"""Create a new DigitalInOut object associated with the pin. Defaults to input
with no pull. Use :py:meth:`switch_to_input` and
:py:meth:`switch_to_output` to change the direction.
:param ~microcontroller.Pin pin: The pin to control"""
self._mask = pin[0]
self._id = pin[1]
self._pin = None
self._direction = None
data = 1 << self._mask
struct.pack_into("<I", self._buffer, 0, data)
if pin[1] == 3:
pass
elif pin[1] == 2:
pass
elif pin[1] == 1:
pass
else:
addr = _evo.D2F_ENSET_ADDR
self._pin = digitalio.DigitalInOut(self._id)
_evo.send_evo_write_trans(addr, self._buffer)
def deinit(self) -> None:
"""Turn off the DigitalInOut and release the pin for other use."""
self._mask = pin[0]
data = 1 << self._mask
#data = struct.pack("<I", data)
struct.pack_into("<I", self._buffer, 0, data)
if self._pin == None:
pass
else:
addr = _evo.D2F_ENCLR_ADDR
_evo.send_evo_write_trans(addr, self._buffer)
def soft_deinit(self) -> None:
"""Release the SAMD pin control, but do not reset the FPGA. Useful for configuring a pin for use with an existing library."""
if self._pin == None:
pass
else:
self._pin.deinit()
def __enter__(self,) -> DigitalInOut:
"""No-op used by Context Managers."""
pass
def __exit__(self,) -> None:
"""Automatically deinitializes the hardware when exiting a context. See
:ref:`lifetime-and-contextmanagers` for more info."""
self.deinit()
def switch_to_output(
self,
value: bool = False,
drive_mode: DriveMode = DriveMode.PUSH_PULL,
#drive_mode: digitalio.DriveMode = digitalio.DriveMode.PUSH_PULL,
) -> None:
"""Set the drive mode and value and then switch to writing out digital
values.
:param bool value: default value to set upon switching
:param ~digitalio.DriveMode drive_mode: drive mode for the output
"""
data = 1 << self._mask
struct.pack_into("<I", self._buffer, 0, data)
if self._id == 3:
addr = _evo.PORT_Z_DIRSET_ADDR
elif self._id == 2:
addr = _evo.PORT_G_DIRSET_ADDR
elif self._id == 1:
addr = _evo.PORT_E_DIRSET_ADDR
else:
addr = _evo.D2F_DIRSET_ADDR
_evo.send_evo_write_trans(addr, self._buffer)
if addr == _evo.D2F_DIRSET_ADDR:
self._pin.switch_to_output(value, drive_mode)
# If we subclassed this would be:
# super().switch_to_output(value, drive_mode)
def switch_to_input(self, pull: Pull = None) -> None:
"""Set the pull and then switch to read in digital values.
:param Pull pull: pull configuration for the input
Example usage::
import digitalio
import board
switch = digitalio.DigitalInOut(board.SLIDE_SWITCH)
switch.switch_to_input(pull=digitalio.Pull.UP)
# Or, after switch_to_input
switch.pull = digitalio.Pull.UP
print(switch.value)"""
data = 1 << self._mask
struct.pack_into("<I", self._buffer, 0, data)
if self._id == 3:
addr = _evo.PORT_Z_DIRCLR_ADDR
elif self._id == 2:
addr = _evo.PORT_G_DIRCLR_ADDR
elif self._id == 1:
addr = _evo.PORT_E_DIRCLR_ADDR
else:
addr = _evo.D2F_DIRCLR_ADDR
_evo.send_evo_write_trans(addr, self._buffer)
if addr == _evo.D2F_DIRSET_ADDR:
self._pin.switch_to_input(pull)
@property
def value(self):
if self._pin != None:
return self._pin.value
else:
data = 1 << self._mask
if self._id == 3:
addr = _evo.PORT_Z_IN_ADDR
elif self._id == 2:
addr = _evo.PORT_G_IN_ADDR
elif self._id == 1:
addr = _evo.PORT_E_IN_ADDR
result = _evo.send_evo_read_trans(addr)
result = struct.unpack("<I", result)
result = result[0] & data
if result > 0:
return True
else:
return False
@value.setter
def value(self, value):
if self._pin != None:
self._pin.value = value
else:
data = 1 << self._mask
struct.pack_into("<I", self._buffer, 0, data)
if value == True:
if self._id == 3:
addr = _evo.PORT_Z_OUTSET_ADDR
elif self._id == 2:
addr = _evo.PORT_G_OUTSET_ADDR
elif self._id == 1:
addr = _evo.PORT_E_OUTSET_ADDR
else:
if self._id == 3:
addr = _evo.PORT_Z_OUTCLR_ADDR
elif self._id == 2:
addr = _evo.PORT_G_OUTCLR_ADDR
elif self._id == 1:
addr = _evo.PORT_E_OUTCLR_ADDR
_evo.send_evo_write_trans(addr, self._buffer)
@property
def direction(self):
return self._direction
# direction: Direction = ...
# """The direction of the pin.
# Setting this will use the defaults from the corresponding
# :py:meth:`switch_to_input` or :py:meth:`switch_to_output` method. If
# you want to set pull, value or drive mode prior to switching, then use
# those methods instead."""
@value.setter
def direction(self, direction: Direction = Direction.INPUT):
self._direction = direction
if self._pin != None:
self._pin.direction = direction
if direction == Direction.OUTPUT:
self.switch_to_output(None)
elif direction == Direction.INPUT:
self.switch_to_input(None)
else:
pass
# DriveMode is not available on Evo, simply override and cause an error
# drive_mode: DriveMode = ...
# """The pin drive mode. One of:
# - `digitalio.DriveMode.PUSH_PULL`
# - `digitalio.DriveMode.OPEN_DRAIN`"""
@value.setter
def drive_mode(self, drive_mode: DriveMode = DriveMode.PUSH_PULL):
raise TypeError("FPGA pins do not allow for DriveMode functionalityal.")
# Pull is not available on Evo, simply override and cause an error
# pull: Optional[Pull] = ...
# """The pin pull direction. One of:
# - `digitalio.Pull.UP`
# - `digitalio.Pull.DOWN`
# - `None`
# :raises AttributeError: if `direction` is :py:data:`~digitalio.Direction.OUTPUT`."""
@value.setter
def pull(self, pull: Pull = Pull.UP):
raise TypeError("FPGA pins do not allow for Pull functionality.")
|
import os, traceback, atexit, logging
import iglesia
from iglesia.utils import message, warning, error
# these are set up in init
ROOTDIR = None
# SERVER_BASEDIR is set up in iglesia (as e.g. /home/user/path)
SHADOW_URL_PREFIX = None # URL prefix for HTTP server serving shadow tree (e.g. http://localhost:port/{SESSION_ID})
FILE_URL_ROOT = None # root URL for accessing files through Jupyter (e.g. /files/to)
NOTEBOOK_URL_ROOT = None # root URL for accessing notebooks through Jupyter (e.g. /notebooks/to)
CACHE_URL_BASE = None # base URL for cache, e.g. http://localhost:port/{SESSION_ID}/home/user/path
CACHE_URL_ROOT = None # URL for cache of root dir, e.g. http://localhost:port/{SESSION_ID}/home/user/path/to
NBCONVERT = None # set to True if running in notebook-convert mode (i.e. non-interactive)
casacore_tables = None
class PadreLogHandler(logging.Handler):
def __init__(self):
super(PadreLogHandler, self).__init__()
self.records = []
def emit(self, record):
self.records.append(record)
def get_records(self, min_level=logging.INFO):
"""Returns accumulated records from the specified level (or higher)"""
if type(min_level) is str:
min_level = getattr(logging, min_level)
return [(logging.getLevelName(rec.levelno), rec.msg) for rec in self.records if rec.levelno >= min_level]
log_handler = PadreLogHandler()
def _strip_slash(path):
return path if path == "/" or path is None else path.rstrip("/")
def _is_subdir(subdir, parent):
return subdir == parent or subdir.startswith(parent+"/")
def _make_symlink(source, link_name):
try:
if os.path.lexists(link_name):
if os.path.exists(link_name) and os.path.samefile(link_name, source):
return
else:
os.unlink(link_name)
os.symlink(source, link_name)
except Exception as exc:
traceback.print_exc()
raise
def init():
"""Initializes radiopadre kernel"""
iglesia.init()
global FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \
SHADOW_URL_PREFIX
global \
ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, SHADOW_BASEDIR, \
SHADOW_ROOTDIR, SESSION_DIR, SESSION_URL, SESSION_ID, \
VERBOSE, HOSTNAME, SNOOP_MODE
from iglesia import \
ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, SHADOW_BASEDIR, \
SHADOW_ROOTDIR, SESSION_DIR, SESSION_URL, SESSION_ID, \
VERBOSE, HOSTNAME, SNOOP_MODE
# setup for snoop mode. Browsing /home/other/path/to,
if SNOOP_MODE:
# for a Jupyter basedir of ~/.radiopadre/home/other/path, this becomes /home/other/path
unshadowed_server_base = SERVER_BASEDIR[len(SHADOW_HOME):]
# Otherwise it'd better have been /home/other/path/to to begin with!
if not _is_subdir(ABSROOTDIR, unshadowed_server_base):
error(f"""The requested directory {ABSROOTDIR} is not under {unshadowed_server_base}.
This is probably a bug! """)
# Since Jupyter is running under ~/.radiopadre/home/other/path, we can serve other's files from
# /home/other/path/to as /files/to/.content
subdir = SHADOW_ROOTDIR[len(SERVER_BASEDIR):] # this becomes "/to" (or "" if paths are the same)
# but do make sure that the .content symlink is in place!
_make_symlink(ABSROOTDIR, SHADOW_ROOTDIR + "/.radiopadre.content")
# else running in native mode
else:
if not _is_subdir(ABSROOTDIR, SERVER_BASEDIR):
warning(f"""The requested directory {ABSROOTDIR} is not under {SERVER_BASEDIR}.
This is probably a bug! """)
# for a server dir of /home/user/path, and an ABSROOTDIR of /home/oms/path/to, get the subdir
subdir = ABSROOTDIR[len(SERVER_BASEDIR):] # this becomes "/to" (or "" if paths are the same)
os.chdir(ABSROOTDIR)
ROOTDIR = '.'
## check casacore availability
global casacore_tables
try:
import casacore.tables as casacore_tables
except Exception as exc:
casacore_tables = None
warning("casacore.tables failed to import. Table browsing functionality will not be available.")
radiopadre_base = os.path.dirname(os.path.dirname(__file__))
# # pre-init JS9 stuff and run JS9 helper
# js9.preinit_js9(in_container, helper_port, userside_helper_port, http_rewrites)
iglesia.init_helpers(radiopadre_base)
# now a port is available (set up in init_helpers()), form up URLs
SHADOW_URL_PREFIX = f"http://localhost:{iglesia.HTTPSERVER_PORT}/{SESSION_ID}"
CACHE_URL_ROOT = SHADOW_URL_PREFIX + ABSROOTDIR
CACHE_URL_BASE = CACHE_URL_ROOT[:-len(subdir)] if subdir else CACHE_URL_ROOT
# when running nbconvert, it doesn't know about the magic "/files" URL, and just needs a local filename
global NBCONVERT
NBCONVERT = bool(os.environ.get("RADIOPADRE_NBCONVERT"))
files_prefix = "." if NBCONVERT else "/files"
if SNOOP_MODE:
FILE_URL_ROOT = f"{files_prefix}{subdir}/.radiopadre.content/"
NOTEBOOK_URL_ROOT = f"/notebooks{subdir}/.radiopadre.content/"
else:
FILE_URL_ROOT = f"{files_prefix}{subdir}/"
NOTEBOOK_URL_ROOT = f"/notebooks{subdir}/"
# init JS9 sources
from . import js9
js9.preinit_js9()
if ROOTDIR is None:
from iglesia import logger
# enable logging
log = logger.init("radiopadre.kernel") #, use_formatter=False)
log.setLevel(logging.DEBUG)
log.addHandler(log_handler)
LOGFILE = logger.enable_logfile("kernel")
logger.disable_printing()
message("initializing radiopadre_kernel")
init()
|
import cnode
Service = cnode.cleinit()
#define input point class
WhenAnyPointClass = Service.CNodePointBase('WhenAnyPointClass')
@WhenAnyPointClass._RegScriptProc_P('OnExecute')
def OnExecute(self,CNodeSet,CNode) :
node_active_flag = self.GetSourceActiveFlag()
for flag in node_active_flag :
if flag == True :
return 1.0
return 0.0
#define input point class
WhenAllPointClass = Service.CNodePointBase('WhenAllPointClass')
@WhenAllPointClass._RegScriptProc_P('OnExecute')
def OnExecute(self,CNodeSet,CNode) :
node_active_flag = self.GetSourceActiveFlag()
for flag in node_active_flag :
if flag == False :
return 0.0
return 1.0
#define node output point class
GeneralOutputClass = Service.CNodePointBase('GeneralOutputClass')
@GeneralOutputClass._RegScriptProc_P('OnExecute')
def OnExecute(self,CNodeSet,CNode) :
node_active_flag = self.GetSourceActiveFlag()
node_negativeflag = self.GetSourceNegativeFlag()
active = False
for i in range(0,len(node_active_flag)) :
if node_active_flag[i] == True and node_negativeflag[i] == False :
active = True
break
if active == True :
#conflict ?
for i in range(0,len(node_active_flag)) :
if node_active_flag[i] == True and node_negativeflag[i] == True :
#conflict, fire the callback
nodeset = self.GetNodeSet()
nodeset.ExecuteIssue(self,cnode.EXECUTE_CONFLICT)
break
if active == True :
return 1.0
return 0.0
#define proc input define class
ProcInputDefineClass = Service.CNodePointBase('ProcInputDefineClass')
@ProcInputDefineClass._RegScriptProc_P('OnExecute')
def OnExecute(self,CNodeSet,CNode) :
pass
|
#! /usr/bin/env python
'''test_mandelbrot_2
Test the Numba compiler on several variants of Mandelbrot set membership
computations.
'''
from numba import *
import unittest
import numpy as np
from numba.testing import test_support
def mandel_1(real_coord, imag_coord, max_iters):
'''Given a the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
Inspired by code at http://wiki.cython.org/examples/mandelbrot
'''
# Ideally we'd want to use a for loop, but we'll need to be able
# to detect and desugar for loops over range/xrange/arange first.
i = 0
z_real = 0.
z_imag = 0.
while i < max_iters:
z_real_n = z_real * z_real - z_imag * z_imag + real_coord
z_imag = 2. * z_real * z_imag + imag_coord
z_real = z_real_n
if (z_real * z_real + z_imag * z_imag) >= 4:
return i
i += 1
return -1
mandel_1c = jit('i4(f8,f8,i4)')(mandel_1)
def mandel_driver_1(min_x, max_x, min_y, nb_iterations, colors, image):
nb_colors = len(colors)
width = image.shape[0]
height = image.shape[1]
pixel_size = (max_x - min_x) / width
for x in range(width):
real = min_x + x * pixel_size
for y in range(height):
imag = min_y + y * pixel_size
# For the following to actually compile, mandel_1 must
# have already been compiled.
color = mandel_1(real, imag, nb_iterations)
# Would prefer the following, just to show off:
# image[x, y, :] = colors[color % nb_colors]
# But that'd require Numba to handle slicing (it doesn't
# at the time this version was writen), and it wouldn't
# have the type information about the shape.
col_index = color % nb_colors # Ohh for wont of CSE...
image[x, y, 0] = colors[col_index, 0]
image[x, y, 1] = colors[col_index, 1]
image[x, y, 2] = colors[col_index, 2]
mandel_driver_1c = jit('void(f8,f8,f8,i4,u1[:,:],u1[:,:,:])')(
mandel_driver_1)
def make_palette():
'''Shamefully stolen from
http://wiki.cython.org/examples/mandelbrot, though we did correct
their spelling mistakes (*smirk*).'''
colors = []
for i in range(0, 25):
colors.append( (i*10, i*8, 50 + i*8), )
for i in range(25, 5, -1):
colors.append( (50 + i*8, 150+i*2, i*10), )
for i in range(10, 2, -1):
colors.append( (0, i*15, 48), )
return np.array(colors, dtype=np.uint8)
def mandel_2(x, max_iterations):
z = complex(0)
for i in range(max_iterations):
z = z**2 + x
if abs(z) >= 2:
return i
return -1
mandel_2c = jit(i4(c16,i4))(mandel_2)
def mandel_driver_2(min_x, max_x, min_y, nb_iterations, colors, image):
nb_colors = len(colors)
width = image.shape[0]
height = image.shape[1]
pixel_size = (max_x - min_x) / width
dy = pixel_size * 1j
for x in range(width):
coord = complex(min_x + x * pixel_size, min_y)
for y in range(height):
color = mandel_2(coord, nb_iterations)
image[x,y,:] = colors[color % nb_colors,:]
coord += dy
mandel_driver_2c = jit(void(f8,f8,f8,i4,u1[:,:],u1[:,:,:]))(mandel_driver_2)
def benchmark(dx = 500, dy = 500):
import time
min_x = -1.5
max_x = 0
min_y = -1.5
colors = make_palette()
nb_iterations = colors.shape[0]
img0 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125
start = time.time()
mandel_driver_1(min_x, max_x, min_y, nb_iterations, colors, img0)
dt0 = time.time() - start
img1 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125
start = time.time()
mandel_driver_1c(min_x, max_x, min_y, nb_iterations, colors, img1)
dt1 = time.time() - start
img2 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125
start = time.time()
mandel_driver_2(min_x, max_x, min_y, nb_iterations, colors, img2)
dt2 = time.time() - start
img3 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125
start = time.time()
mandel_driver_2c(min_x, max_x, min_y, nb_iterations, colors, img3)
dt3 = time.time() - start
return (dt0, dt1, dt2, dt3), (img0, img1, img2, img3)
class TestMandelbrot(unittest.TestCase):
def test_mandel_1_sanity(self):
self.assertEqual(mandel_1c(0., 0., 20), -1)
def test_mandel_1(self):
vals = np.arange(-1., 1.000001, 0.1)
for real in vals:
for imag in vals:
self.assertEqual(mandel_1(real, imag, 20),
mandel_1c(real, imag, 20))
def test_mandel_driver_1(self):
palette = make_palette()
control_image = np.zeros((50, 50, 3), dtype = np.uint8)
mandel_driver_1(-1., 1., -1., len(palette), palette, control_image)
test_image = np.zeros_like(control_image)
self.assertTrue((control_image - test_image == control_image).all())
mandel_driver_1c(-1., 1., -1., len(palette), palette, test_image)
image_diff = control_image - test_image
self.assertTrue((image_diff == 0).all())
def test_mandel_driver_2(self):
palette = make_palette()
control_image = np.zeros((50, 50, 3), dtype = np.uint8)
mandel_driver_2(-1., 1., -1., len(palette), palette, control_image)
test_image = np.zeros_like(control_image)
self.assertTrue((control_image - test_image == control_image).all())
mandel_driver_2c(-1., 1., -1., len(palette), palette, test_image)
image_diff = control_image - test_image
self.assertTrue((image_diff == 0).all())
if __name__ == "__main__":
test_support.main()
|
import os
class Config(object):
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = os.environ['DEBUG']
MONGODB_HOST = os.environ['MONGODB_HOST']
MONGODB_PORT = int(os.environ['MONGODB_PORT'])
MONGODB_DB = os.environ['MONGODB_DATABASE']
MONGODB_USERNAME = os.environ['MONGODB_USER']
MONGODB_PASSWORD = os.environ['MONGODB_PASS']
|
"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
import email.parser
import email.message
import io
import os
import socket
import collections
from urllib.parse import urlsplit
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
PRECONDITION_REQUIRED = 428
TOO_MANY_REQUESTS = 429
REQUEST_HEADER_FIELDS_TOO_LARGE = 431
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
NETWORK_AUTHENTICATION_REQUIRED = 511
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
428: 'Precondition Required',
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
511: 'Network Authentication Required',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
_MAXHEADERS = 100
class HTTPMessage(email.message.Message):
# XXX The only usage of this method is in
# http.server.CGIHTTPRequestHandler. Maybe move the code there so
# that it doesn't need to be part of the public API. The API has
# never been defined so this could cause backwards compatibility
# issues.
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.keys():
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def parse_headers(fp, _class=HTTPMessage):
"""Parses only RFC2822 headers from a file pointer.
email Parser wants to see strings rather than bytes.
But a TextIOWrapper around self.rfile would buffer too many bytes
from the stream, bytes which we later need to read as bytes.
So we read the correct bytes here, as bytes, for email Parser
to parse.
"""
headers = []
while True:
line = fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
headers.append(line)
if len(headers) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if line in (b'\r\n', b'\n', b''):
break
hstring = b''.join(headers).decode('iso-8859-1')
return email.parser.Parser(_class=_class).parsestr(hstring)
class HTTPResponse(io.BufferedIOBase):
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
# The bytes from the socket object are iso-8859-1 strings.
# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
# text following RFC 2047. The basic status line parsing only
# accepts iso-8859-1.
def __init__(self, sock, debuglevel=0, method=None, url=None):
# If the response includes a content-length header, we need to
# make sure that the client doesn't read more than the
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. This will
# happen if a self.fp.read() is done (without a size) whether
# self.fp is buffered or not. So, no self.fp.read() by
# clients unless they know what they are doing.
self.fp = sock.makefile("rb")
self.debuglevel = debuglevel
self._method = method
# The HTTPResponse object is returned via urllib. The clients
# of http and urllib expect different attributes for the
# headers. headers is used here and supports urllib. msg is
# provided as a backwards compatibility layer for http
# clients.
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
if len(line) > _MAXLINE:
raise LineTooLong("status line")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail.
version = ""
if not version.startswith("HTTP/"):
self._close_conn()
raise BadStatusLine(line)
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.headers is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.code = self.status = status
self.reason = reason.strip()
if version in ("HTTP/1.0", "HTTP/0.9"):
# Some servers might still return "0.9", treat it as 1.0 anyway
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
else:
raise UnknownProtocol(version)
self.headers = self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr in self.headers:
print("header:", hdr, end=" ")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.headers.get("content-length")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == "HEAD"):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
def _check_close(self):
conn = self.headers.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.headers.get("connection")
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.headers.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.headers.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def _close_conn(self):
fp = self.fp
self.fp = None
fp.close()
def close(self):
super().close() # set "closed" flag
if self.fp:
self._close_conn()
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
def flush(self):
super().flush()
if self.fp:
self.fp.flush()
def readable(self):
return True
# End of "raw stream" methods
def isclosed(self):
"""True if the connection is closed."""
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return b""
if self._method == "HEAD":
self._close_conn()
return b""
if amt is not None:
# Amount is given, implement using readinto
b = bytearray(amt)
n = self.readinto(b)
return memoryview(b)[:n].tobytes()
else:
# Amount is not given (unbounded read) so we must check self.length
# and self.chunked
if self.chunked:
return self._readall_chunked()
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self._close_conn()
raise
self.length = 0
self._close_conn() # we read everything
return s
def readinto(self, b):
if self.fp is None:
return 0
if self._method == "HEAD":
self._close_conn()
return 0
if self.chunked:
return self._readinto_chunked(b)
if self.length is not None:
if len(b) > self.length:
# clip the read to the "end of response"
b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
n = self.fp.readinto(b)
if not n and b:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self._close_conn()
elif self.length is not None:
self.length -= n
if not self.length:
self._close_conn()
return n
def _read_next_chunk_size(self):
# Read the next chunk size from the file
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(b";")
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
return int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self._close_conn()
raise
def _read_and_discard_trailer(self):
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line in (b'\r\n', b'\n', b''):
break
def _get_chunk_left(self):
# return self.chunk_left, reading a new chunk if necessary.
# chunk_left == 0: at the end of the current chunk, need to close it
# chunk_left == None: No current chunk, should read next.
# This function returns non-zero or None if the last chunk has
# been read.
chunk_left = self.chunk_left
if not chunk_left: # Can be 0 or None
if chunk_left is not None:
# We are at the end of chunk. dicard chunk end
self._safe_read(2) # toss the CRLF at the end of the chunk
try:
chunk_left = self._read_next_chunk_size()
except ValueError:
raise IncompleteRead(b'')
if chunk_left == 0:
# last chunk: 1*("0") [ chunk-extension ] CRLF
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
chunk_left = None
self.chunk_left = chunk_left
return chunk_left
def _readall_chunked(self):
assert self.chunked != _UNKNOWN
value = []
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
break
value.append(self._safe_read(chunk_left))
self.chunk_left = 0
return b''.join(value)
except IncompleteRead:
raise IncompleteRead(b''.join(value))
def _readinto_chunked(self, b):
assert self.chunked != _UNKNOWN
total_bytes = 0
mvb = memoryview(b)
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
return total_bytes
if len(mvb) <= chunk_left:
n = self._safe_readinto(mvb)
self.chunk_left = chunk_left - n
return total_bytes + n
temp_mvb = mvb[:chunk_left]
n = self._safe_readinto(temp_mvb)
mvb = mvb[n:]
total_bytes += n
self.chunk_left = 0
except IncompleteRead:
raise IncompleteRead(bytes(b[0:total_bytes]))
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(b''.join(s), amt)
s.append(chunk)
amt -= len(chunk)
return b"".join(s)
def _safe_readinto(self, b):
"""Same as _safe_read, but for reading into a buffer."""
total_bytes = 0
mvb = memoryview(b)
while total_bytes < len(b):
if MAXAMOUNT < len(mvb):
temp_mvb = mvb[0:MAXAMOUNT]
n = self.fp.readinto(temp_mvb)
else:
n = self.fp.readinto(mvb)
if not n:
raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
mvb = mvb[n:]
total_bytes += n
return total_bytes
def read1(self, n=-1):
"""Read with at most one underlying system call. If at least one
byte is buffered, return that instead.
"""
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._read1_chunked(n)
try:
result = self.fp.read1(n)
except ValueError:
if n >= 0:
raise
# some implementations, like BufferedReader, don't support -1
# Read an arbitrarily selected largeish chunk.
result = self.fp.read1(16*1024)
if not result and n:
self._close_conn()
return result
def peek(self, n=-1):
# Having this enables IOBase.readline() to read more than one
# byte at a time
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._peek_chunked(n)
return self.fp.peek(n)
def readline(self, limit=-1):
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
# Fallback to IOBase readline which uses peek() and read()
return super().readline(limit)
result = self.fp.readline(limit)
if not result and limit:
self._close_conn()
return result
def _read1_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
chunk_left = self._get_chunk_left()
if chunk_left is None or n == 0:
return b''
if not (0 <= n <= chunk_left):
n = chunk_left # if n is negative or larger than chunk_left
read = self.fp.read1(n)
self.chunk_left -= len(read)
if not read:
raise IncompleteRead(b"")
return read
def _peek_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
try:
chunk_left = self._get_chunk_left()
except IncompleteRead:
return b'' # peek doesn't worry about protocol
if chunk_left is None:
return b'' # eof
# peek is allowed to return more than requested. Just request the
# entire chunk, and truncate what we get.
return self.fp.peek(chunk_left)[:chunk_left]
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.headers is None:
raise ResponseNotReady()
return list(self.headers.items())
# We override IOBase.__iter__ so that it doesn't check for closed-ness
def __iter__(self):
return self
# For compatibility with old-style urllib responses.
def info(self):
return self.headers
def geturl(self):
return self.url
def getcode(self):
return self.status
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
# TCP Maximum Segment Size (MSS) is determined by the TCP stack on
# a per-connection basis. There is no simple and efficient
# platform independent mechanism for determining the MSS, so
# instead a reasonable estimate is chosen. The getsockopt()
# interface using the TCP_MAXSEG parameter may be a suitable
# approach on some operating systems. A value of 16KiB is chosen
# as a reasonable estimate of the maximum MSS.
mss = 16384
def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
(self.host, self.port) = self._get_hostport(host, port)
# This is stored as an instance variable to allow unit
# tests to replace it with a suitable mockup
self._create_connection = socket.create_connection
def set_tunnel(self, host, port=None, headers=None):
"""Set up host and port for HTTP CONNECT tunnelling.
In a connection that uses HTTP CONNECT tunneling, the host passed to the
constructor is used as a proxy server that relays all communication to
the endpoint passed to `set_tunnel`. This done by sending an HTTP
CONNECT request to the proxy server when the connection is established.
This method must be called before the HTML connection has been
established.
The headers argument should be a mapping of extra HTTP headers to send
with the CONNECT request.
"""
if self.sock:
raise RuntimeError("Can't set up tunnel for established connection")
self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _get_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return (host, port)
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
self._tunnel_port)
connect_bytes = connect_str.encode("ascii")
self.send(connect_bytes)
for header, value in self._tunnel_headers.items():
header_str = "%s: %s\r\n" % (header, value)
header_bytes = header_str.encode("latin-1")
self.send(header_bytes)
self.send(b'\r\n')
response = self.response_class(self.sock, method=self._method)
(version, code, message) = response._read_status()
if code != 200:
self.close()
raise OSError("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
# for sites which EOF without sending a trailer
break
if line in (b'\r\n', b'\n', b''):
break
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = self._create_connection((self.host,self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
self.__response = None
self.__state = _CS_IDLE
def send(self, data):
"""Send `data' to the server.
``data`` can be a string object, a bytes object, an array object, a
file-like object that supports a .read() method, or an iterable object.
"""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print("send:", repr(data))
blocksize = 8192
if hasattr(data, "read") :
if self.debuglevel > 0:
print("sendIng a read()able")
encode = False
try:
mode = data.mode
except AttributeError:
# io.BytesIO and other file-like objects don't have a `mode`
# attribute.
pass
else:
if "b" not in mode:
encode = True
if self.debuglevel > 0:
print("encoding file using iso-8859-1")
while 1:
datablock = data.read(blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
self.sock.sendall(datablock)
return
try:
self.sock.sendall(data)
except TypeError:
if isinstance(data, collections.Iterable):
for d in data:
self.sock.sendall(d)
else:
raise TypeError("data should be a bytes-like object "
"or an iterable, got %r" % type(data))
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self, message_body=None):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend((b"", b""))
msg = b"\r\n".join(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm. However,
# there is no performance gain if the message is larger
# than MSS (and there is a memory penalty for the message
# copy).
if isinstance(message_body, bytes) and len(message_body) < self.mss:
msg += message_body
message_body = None
self.send(msg)
if message_body is not None:
# message_body was not a string (i.e. it is a file), and
# we must run the risk of Nagle.
self.send(message_body)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest(self.__state)
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
request = '%s %s %s' % (method, url, self._http_vsn_str)
# Non-ASCII characters should have been eliminated earlier
self._output(request.encode('ascii'))
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
if self._tunnel_host:
host = self._tunnel_host
port = self._tunnel_port
else:
host = self.host
port = self.port
try:
host_enc = host.encode("ascii")
except UnicodeEncodeError:
host_enc = host.encode("idna")
# As per RFC 273, IPv6 address should be wrapped with []
# when used as Host header
if host.find(':') >= 0:
host_enc = b'[' + host_enc + b']'
if port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
if hasattr(header, 'encode'):
header = header.encode('ascii')
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
values[i] = one_value.encode('latin-1')
elif isinstance(one_value, int):
values[i] = str(one_value).encode('ascii')
value = b'\r\n\t'.join(values)
header = header + b': ' + value
self._output(header)
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional message_body
argument can be used to pass a message body associated with the
request. The message body will be sent in the same packet as the
message headers if it is a string, otherwise it is sent as a separate
packet.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers)
def _set_content_length(self, body):
# Set the content-length based on the body.
thelen = None
try:
thelen = str(len(body))
except TypeError as te:
# If this is a file-like object, try to
# fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print("Cannot stat!!")
if thelen is not None:
self.putheader('Content-Length', thelen)
def _send_request(self, method, url, body, headers):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if body is not None and ('content-length' not in header_names):
self._set_content_length(body)
for hdr, value in headers.items():
self.putheader(hdr, value)
if isinstance(body, str):
# RFC 2616 Section 3.7.1 says that text default has a
# default charset of iso-8859-1.
body = body.encode('iso-8859-1')
self.endheaders(body)
def getresponse(self):
"""Get the response from the server.
If the HTTPConnection is in the correct state, returns an
instance of HTTPResponse or of whatever object is returned by
class the response_class variable.
If a request has not been sent or if a previous response has
not be handled, ResponseNotReady is raised. If the HTTP
response indicates that the connection should be closed, then
it will be closed before the response is returned. When the
connection is closed, the underlying socket is closed.
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady(self.__state)
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
method=self._method)
else:
response = self.response_class(self.sock, method=self._method)
try:
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
except:
response.close()
raise
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
# XXX Should key_file and cert_file be deprecated in favour of context?
def __init__(self, host, port=None, key_file=None, cert_file=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, *, context=None,
check_hostname=None):
super(HTTPSConnection, self).__init__(host, port, timeout,
source_address)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
context = ssl._create_default_https_context()
will_verify = context.verify_mode != ssl.CERT_NONE
if check_hostname is None:
check_hostname = context.check_hostname
if check_hostname and not will_verify:
raise ValueError("check_hostname needs a SSL context with "
"either CERT_OPTIONAL or CERT_REQUIRED")
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
self._check_hostname = check_hostname
def connect(self):
"Connect to a host on a given (SSL) port."
super().connect()
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock,
server_hostname=server_hostname)
if not self._context.check_hostname and self._check_hostname:
try:
ssl.match_hostname(self.sock.getpeercert(), server_hostname)
except Exception:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
__all__.append("HTTPSConnection")
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return '%s(%i bytes read%s)' % (self.__class__.__name__,
len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
# for backwards compatibility
error = HTTPException
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import ApplicationInsightsComponent
from ._models_py3 import ApplicationInsightsComponentListResult
from ._models_py3 import ComponentPurgeBody
from ._models_py3 import ComponentPurgeBodyFilters
from ._models_py3 import ComponentPurgeResponse
from ._models_py3 import ComponentPurgeStatusResponse
from ._models_py3 import ComponentsResource
from ._models_py3 import ErrorResponseComponents
from ._models_py3 import ErrorResponseComponentsError
from ._models_py3 import PrivateLinkScopedResource
from ._models_py3 import TagsResource
from ._application_insights_management_client_enums import (
ApplicationType,
FlowType,
IngestionMode,
PublicNetworkAccessType,
PurgeState,
RequestSource,
)
__all__ = [
'ApplicationInsightsComponent',
'ApplicationInsightsComponentListResult',
'ComponentPurgeBody',
'ComponentPurgeBodyFilters',
'ComponentPurgeResponse',
'ComponentPurgeStatusResponse',
'ComponentsResource',
'ErrorResponseComponents',
'ErrorResponseComponentsError',
'PrivateLinkScopedResource',
'TagsResource',
'ApplicationType',
'FlowType',
'IngestionMode',
'PublicNetworkAccessType',
'PurgeState',
'RequestSource',
]
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Soft Actor-Critic Agent.
Implements the Soft Actor-Critic (SAC) algorithm from
"Soft Actor-Critic Algorithms and Applications" by Haarnoja et al (2019).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gin
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.agents import tf_agent
from tf_agents.policies import actor_policy
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from tf_agents.utils import eager_utils
from tf_agents.utils import nest_utils
SacLossInfo = collections.namedtuple(
'SacLossInfo', ('critic_loss', 'actor_loss', 'alpha_loss'))
@gin.configurable
def std_clip_transform(stddevs):
stddevs = tf.nest.map_structure(lambda t: tf.clip_by_value(t, -20, 2),
stddevs)
return tf.exp(stddevs)
@gin.configurable
class SacAgent(tf_agent.TFAgent):
"""A SAC Agent."""
def __init__(self,
time_step_spec,
action_spec,
critic_network,
actor_network,
actor_optimizer,
critic_optimizer,
alpha_optimizer,
actor_policy_ctor=actor_policy.ActorPolicy,
critic_network_2=None,
target_critic_network=None,
target_critic_network_2=None,
target_update_tau=1.0,
target_update_period=1,
td_errors_loss_fn=tf.math.squared_difference,
gamma=1.0,
reward_scale_factor=1.0,
initial_log_alpha=0.0,
target_entropy=None,
gradient_clipping=None,
debug_summaries=False,
summarize_grads_and_vars=False,
train_step_counter=None,
name=None):
"""Creates a SAC Agent.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
critic_network: A function critic_network((observations, actions)) that
returns the q_values for each observation and action.
actor_network: A function actor_network(observation, action_spec) that
returns action distribution.
actor_optimizer: The optimizer to use for the actor network.
critic_optimizer: The default optimizer to use for the critic network.
alpha_optimizer: The default optimizer to use for the alpha variable.
actor_policy_ctor: The policy class to use.
critic_network_2: (Optional.) A `tf_agents.network.Network` to be used as
the second critic network during Q learning. The weights from
`critic_network` are copied if this is not provided.
target_critic_network: (Optional.) A `tf_agents.network.Network` to be
used as the target critic network during Q learning. Every
`target_update_period` train steps, the weights from `critic_network`
are copied (possibly withsmoothing via `target_update_tau`) to `
target_critic_network`. If `target_critic_network` is not provided, it
is created by making a copy of `critic_network`, which initializes a new
network with the same structure and its own layers and weights.
Performing a `Network.copy` does not work when the network instance
already has trainable parameters (e.g., has already been built, or when
the network is sharing layers with another). In these cases, it is up
to you to build a copy having weights that are not shared with the
original `critic_network`, so that this can be used as a target network.
If you provide a `target_critic_network` that shares any weights with
`critic_network`, a warning will be logged but no exception is thrown.
target_critic_network_2: (Optional.) Similar network as
target_critic_network but for the critic_network_2. See documentation
for target_critic_network. Will only be used if 'critic_network_2' is
also specified.
target_update_tau: Factor for soft update of the target networks.
target_update_period: Period for soft update of the target networks.
td_errors_loss_fn: A function for computing the elementwise TD errors
loss.
gamma: A discount factor for future rewards.
reward_scale_factor: Multiplicative scale for the reward.
initial_log_alpha: Initial value for log_alpha.
target_entropy: The target average policy entropy, for updating alpha. The
default value is negative of the total number of actions.
gradient_clipping: Norm length to clip gradients.
debug_summaries: A bool to gather debug summaries.
summarize_grads_and_vars: If True, gradient and network variable summaries
will be written during training.
train_step_counter: An optional counter to increment every time the train
op is run. Defaults to the global_step.
name: The name of this agent. All variables in this module will fall under
that name. Defaults to the class name.
"""
tf.Module.__init__(self, name=name)
self._critic_network_1 = critic_network
self._target_critic_network_1 = (
common.maybe_copy_target_network_with_checks(self._critic_network_1,
target_critic_network,
'TargetCriticNetwork1'))
if critic_network_2 is not None:
self._critic_network_2 = critic_network_2
else:
self._critic_network_2 = critic_network.copy(name='CriticNetwork2')
# Do not use target_critic_network_2 if critic_network_2 is None.
target_critic_network_2 = None
self._target_critic_network_2 = (
common.maybe_copy_target_network_with_checks(self._critic_network_2,
target_critic_network_2,
'TargetCriticNetwork2'))
self._actor_network = actor_network
policy = actor_policy_ctor(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=self._actor_network)
self._log_alpha = common.create_variable(
'initial_log_alpha',
initial_value=initial_log_alpha,
dtype=tf.float32,
trainable=True)
# If target_entropy was not passed, set it to negative of the total number
# of action dimensions.
if target_entropy is None:
flat_action_spec = tf.nest.flatten(action_spec)
target_entropy = -np.sum([
np.product(single_spec.shape.as_list())
for single_spec in flat_action_spec
])
self._target_update_tau = target_update_tau
self._target_update_period = target_update_period
self._actor_optimizer = actor_optimizer
self._critic_optimizer = critic_optimizer
self._alpha_optimizer = alpha_optimizer
self._td_errors_loss_fn = td_errors_loss_fn
self._gamma = gamma
self._reward_scale_factor = reward_scale_factor
self._target_entropy = target_entropy
self._gradient_clipping = gradient_clipping
self._debug_summaries = debug_summaries
self._summarize_grads_and_vars = summarize_grads_and_vars
self._update_target = self._get_target_updater(
tau=self._target_update_tau, period=self._target_update_period)
train_sequence_length = 2 if not critic_network.state_spec else None
super(SacAgent, self).__init__(
time_step_spec,
action_spec,
policy=policy,
collect_policy=policy,
train_sequence_length=train_sequence_length,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step_counter)
def _initialize(self):
"""Returns an op to initialize the agent.
Copies weights from the Q networks to the target Q network.
"""
common.soft_variables_update(
self._critic_network_1.variables,
self._target_critic_network_1.variables,
tau=1.0)
common.soft_variables_update(
self._critic_network_2.variables,
self._target_critic_network_2.variables,
tau=1.0)
def _experience_to_transitions(self, experience):
transitions = trajectory.to_transition(experience)
time_steps, policy_steps, next_time_steps = transitions
actions = policy_steps.action
if (self.train_sequence_length is not None and
self.train_sequence_length == 2):
# Sequence empty time dimension if critic network is stateless.
time_steps, actions, next_time_steps = tf.nest.map_structure(
lambda t: tf.squeeze(t, axis=1),
(time_steps, actions, next_time_steps))
return time_steps, actions, next_time_steps
def _train(self, experience, weights):
"""Returns a train op to update the agent's networks.
This method trains with the provided batched experience.
Args:
experience: A time-stacked trajectory object.
weights: Optional scalar or elementwise (per-batch-entry) importance
weights.
Returns:
A train_op.
Raises:
ValueError: If optimizers are None and no default value was provided to
the constructor.
"""
time_steps, actions, next_time_steps = self._experience_to_transitions(
experience)
trainable_critic_variables = (
self._critic_network_1.trainable_variables +
self._critic_network_2.trainable_variables)
with tf.GradientTape(watch_accessed_variables=False) as tape:
assert trainable_critic_variables, ('No trainable critic variables to '
'optimize.')
tape.watch(trainable_critic_variables)
critic_loss = self.critic_loss(
time_steps,
actions,
next_time_steps,
td_errors_loss_fn=self._td_errors_loss_fn,
gamma=self._gamma,
reward_scale_factor=self._reward_scale_factor,
weights=weights)
tf.debugging.check_numerics(critic_loss, 'Critic loss is inf or nan.')
critic_grads = tape.gradient(critic_loss, trainable_critic_variables)
self._apply_gradients(critic_grads, trainable_critic_variables,
self._critic_optimizer)
trainable_actor_variables = self._actor_network.trainable_variables
with tf.GradientTape(watch_accessed_variables=False) as tape:
assert trainable_actor_variables, ('No trainable actor variables to '
'optimize.')
tape.watch(trainable_actor_variables)
actor_loss = self.actor_loss(time_steps, weights=weights)
tf.debugging.check_numerics(actor_loss, 'Actor loss is inf or nan.')
actor_grads = tape.gradient(actor_loss, trainable_actor_variables)
self._apply_gradients(actor_grads, trainable_actor_variables,
self._actor_optimizer)
alpha_variable = [self._log_alpha]
with tf.GradientTape(watch_accessed_variables=False) as tape:
assert alpha_variable, 'No alpha variable to optimize.'
tape.watch(alpha_variable)
alpha_loss = self.alpha_loss(time_steps, weights=weights)
tf.debugging.check_numerics(alpha_loss, 'Alpha loss is inf or nan.')
alpha_grads = tape.gradient(alpha_loss, alpha_variable)
self._apply_gradients(alpha_grads, alpha_variable, self._alpha_optimizer)
with tf.name_scope('Losses'):
tf.compat.v2.summary.scalar(
name='critic_loss', data=critic_loss, step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='actor_loss', data=actor_loss, step=self.train_step_counter)
tf.compat.v2.summary.scalar(
name='alpha_loss', data=alpha_loss, step=self.train_step_counter)
self.train_step_counter.assign_add(1)
self._update_target()
total_loss = critic_loss + actor_loss + alpha_loss
extra = SacLossInfo(critic_loss=critic_loss,
actor_loss=actor_loss,
alpha_loss=alpha_loss)
return tf_agent.LossInfo(loss=total_loss, extra=extra)
def _apply_gradients(self, gradients, variables, optimizer):
# list(...) is required for Python3.
grads_and_vars = list(zip(gradients, variables))
if self._gradient_clipping is not None:
grads_and_vars = eager_utils.clip_gradient_norms(grads_and_vars,
self._gradient_clipping)
if self._summarize_grads_and_vars:
eager_utils.add_variables_summaries(grads_and_vars,
self.train_step_counter)
eager_utils.add_gradients_summaries(grads_and_vars,
self.train_step_counter)
optimizer.apply_gradients(grads_and_vars)
def _get_target_updater(self, tau=1.0, period=1):
"""Performs a soft update of the target network parameters.
For each weight w_s in the original network, and its corresponding
weight w_t in the target network, a soft update is:
w_t = (1- tau) x w_t + tau x ws
Args:
tau: A float scalar in [0, 1]. Default `tau=1.0` means hard update.
period: Step interval at which the target network is updated.
Returns:
A callable that performs a soft update of the target network parameters.
"""
with tf.name_scope('update_target'):
def update():
"""Update target network."""
critic_update_1 = common.soft_variables_update(
self._critic_network_1.variables,
self._target_critic_network_1.variables, tau)
critic_update_2 = common.soft_variables_update(
self._critic_network_2.variables,
self._target_critic_network_2.variables, tau)
return tf.group(critic_update_1, critic_update_2)
return common.Periodically(update, period, 'update_targets')
def _actions_and_log_probs(self, time_steps):
"""Get actions and corresponding log probabilities from policy."""
# Get raw action distribution from policy, and initialize bijectors list.
batch_size = nest_utils.get_outer_shape(time_steps, self._time_step_spec)[0]
policy_state = self.policy.get_initial_state(batch_size)
action_distribution = self.policy.distribution(
time_steps, policy_state=policy_state).action
# Sample actions and log_pis from transformed distribution.
actions = tf.nest.map_structure(lambda d: d.sample(), action_distribution)
log_pi = common.log_probability(action_distribution, actions,
self.action_spec)
return actions, log_pi
def critic_loss(self,
time_steps,
actions,
next_time_steps,
td_errors_loss_fn,
gamma=1.0,
reward_scale_factor=1.0,
weights=None):
"""Computes the critic loss for SAC training.
Args:
time_steps: A batch of timesteps.
actions: A batch of actions.
next_time_steps: A batch of next timesteps.
td_errors_loss_fn: A function(td_targets, predictions) to compute
elementwise (per-batch-entry) loss.
gamma: Discount for future rewards.
reward_scale_factor: Multiplicative factor to scale rewards.
weights: Optional scalar or elementwise (per-batch-entry) importance
weights.
Returns:
critic_loss: A scalar critic loss.
"""
with tf.name_scope('critic_loss'):
tf.nest.assert_same_structure(actions, self.action_spec)
tf.nest.assert_same_structure(time_steps, self.time_step_spec)
tf.nest.assert_same_structure(next_time_steps, self.time_step_spec)
next_actions, next_log_pis = self._actions_and_log_probs(next_time_steps)
target_input_1 = (next_time_steps.observation, next_actions)
target_q_values1, unused_network_state1 = self._target_critic_network_1(
target_input_1, next_time_steps.step_type)
target_input_2 = (next_time_steps.observation, next_actions)
target_q_values2, unused_network_state2 = self._target_critic_network_2(
target_input_2, next_time_steps.step_type)
target_q_values = (
tf.minimum(target_q_values1, target_q_values2) -
tf.exp(self._log_alpha) * next_log_pis)
td_targets = tf.stop_gradient(
reward_scale_factor * next_time_steps.reward +
gamma * next_time_steps.discount * target_q_values)
pred_input_1 = (time_steps.observation, actions)
pred_td_targets1, unused_network_state1 = self._critic_network_1(
pred_input_1, time_steps.step_type)
pred_input_2 = (time_steps.observation, actions)
pred_td_targets2, unused_network_state2 = self._critic_network_2(
pred_input_2, time_steps.step_type)
critic_loss1 = td_errors_loss_fn(td_targets, pred_td_targets1)
critic_loss2 = td_errors_loss_fn(td_targets, pred_td_targets2)
critic_loss = critic_loss1 + critic_loss2
if weights is not None:
critic_loss *= weights
# Take the mean across the batch.
critic_loss = tf.reduce_mean(input_tensor=critic_loss)
if self._debug_summaries:
td_errors1 = td_targets - pred_td_targets1
td_errors2 = td_targets - pred_td_targets2
td_errors = tf.concat([td_errors1, td_errors2], axis=0)
common.generate_tensor_summaries('td_errors', td_errors,
self.train_step_counter)
common.generate_tensor_summaries('td_targets', td_targets,
self.train_step_counter)
common.generate_tensor_summaries('pred_td_targets1', pred_td_targets1,
self.train_step_counter)
common.generate_tensor_summaries('pred_td_targets2', pred_td_targets2,
self.train_step_counter)
return critic_loss
def actor_loss(self, time_steps, weights=None):
"""Computes the actor_loss for SAC training.
Args:
time_steps: A batch of timesteps.
weights: Optional scalar or elementwise (per-batch-entry) importance
weights.
Returns:
actor_loss: A scalar actor loss.
"""
with tf.name_scope('actor_loss'):
tf.nest.assert_same_structure(time_steps, self.time_step_spec)
actions, log_pi = self._actions_and_log_probs(time_steps)
target_input_1 = (time_steps.observation, actions)
target_q_values1, unused_network_state1 = self._critic_network_1(
target_input_1, time_steps.step_type)
target_input_2 = (time_steps.observation, actions)
target_q_values2, unused_network_state2 = self._critic_network_2(
target_input_2, time_steps.step_type)
target_q_values = tf.minimum(target_q_values1, target_q_values2)
actor_loss = tf.exp(self._log_alpha) * log_pi - target_q_values
if weights is not None:
actor_loss *= weights
actor_loss = tf.reduce_mean(input_tensor=actor_loss)
if self._debug_summaries:
common.generate_tensor_summaries('actor_loss', actor_loss,
self.train_step_counter)
common.generate_tensor_summaries('actions', actions,
self.train_step_counter)
common.generate_tensor_summaries('log_pi', log_pi,
self.train_step_counter)
tf.compat.v2.summary.scalar(
name='entropy_avg',
data=-tf.reduce_mean(input_tensor=log_pi),
step=self.train_step_counter)
common.generate_tensor_summaries('target_q_values', target_q_values,
self.train_step_counter)
batch_size = nest_utils.get_outer_shape(
time_steps, self._time_step_spec)[0]
policy_state = self.policy.get_initial_state(batch_size)
action_distribution = self.policy.distribution(
time_steps, policy_state).action
if isinstance(action_distribution, tfp.distributions.Normal):
common.generate_tensor_summaries('act_mean', action_distribution.loc,
self.train_step_counter)
common.generate_tensor_summaries(
'act_stddev', action_distribution.scale, self.train_step_counter)
elif isinstance(action_distribution, tfp.distributions.Categorical):
common.generate_tensor_summaries(
'act_mode', action_distribution.mode(), self.train_step_counter)
try:
common.generate_tensor_summaries('entropy_action',
action_distribution.entropy(),
self.train_step_counter)
except NotImplementedError:
pass # Some distributions do not have an analytic entropy.
return actor_loss
def alpha_loss(self, time_steps, weights=None):
"""Computes the alpha_loss for EC-SAC training.
Args:
time_steps: A batch of timesteps.
weights: Optional scalar or elementwise (per-batch-entry) importance
weights.
Returns:
alpha_loss: A scalar alpha loss.
"""
with tf.name_scope('alpha_loss'):
tf.nest.assert_same_structure(time_steps, self.time_step_spec)
unused_actions, log_pi = self._actions_and_log_probs(time_steps)
alpha_loss = (
self._log_alpha * tf.stop_gradient(-log_pi - self._target_entropy))
if weights is not None:
alpha_loss *= weights
alpha_loss = tf.reduce_mean(input_tensor=alpha_loss)
if self._debug_summaries:
common.generate_tensor_summaries('alpha_loss', alpha_loss,
self.train_step_counter)
return alpha_loss
|
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
import gettext
from tortuga.cli.tortugaCli import TortugaCli
from tortuga.helper.osHelper import getOsInfo
from tortuga.wsapi.kitWsApi import KitWsApi
from tortuga.wsapi.nodeWsApi import NodeWsApi
from tortuga.wsapi.softwareProfileWsApi import SoftwareProfileWsApi
_ = gettext.gettext
def displayComponent(c, kit):
# Depends on the __repr__ of Component and Kit objects
print('%s %s' % (kit, c))
class GetComponentList(TortugaCli):
def parseArgs(self, usage=None):
optGroup = 'Options'
group = self.addOptionGroup(optGroup, '')
excl_option_group = group.add_mutually_exclusive_group()
excl_option_group.add_argument(
'--software-profile',
dest='softwareprofile',
help=_('Display list of components enabled in software profile.')
)
excl_option_group.add_argument(
'-p',
dest='applyToInstaller',
action='store_true',
default=False,
help=_('Display components enabled on installer only')
)
excl_option_group.add_argument(
'--os',
dest='os',
metavar='NAME-VERSION-ARCH',
help=_('Display components suitable for specified OS only')
)
super().parseArgs(usage=usage)
def __get_software_profile(self):
# Determine software profile name based on command-line option(s)
if self.getArgs().applyToInstaller:
api = self.configureClient(NodeWsApi)
# Get software profile name from installer node
node = api.getInstallerNode(
optionDict={
'softwareprofile': True,
}
)
return node.getSoftwareProfile().getName()
return self.getArgs().softwareprofile
def runCommand(self):
self.parseArgs(_("""
Display list of components available for software profiles in the system.
"""))
softwareProfileName = self.__get_software_profile()
if softwareProfileName:
# Display all components enabled for software profile
swp_api = self.configureClient(SoftwareProfileWsApi)
for c in swp_api.getEnabledComponentList(softwareProfileName):
displayComponent(c, c.getKit())
return
if self.getArgs().os:
try:
name, version, arch = self.getArgs().os.split('-', 3)
except ValueError:
self.getParser().error(
'Malformed argument to --os. Must be in form of'
' NAME-VERSION-ARCH')
osinfo = getOsInfo(name, version, arch)
else:
osinfo = None
# Display all components
kit_api = self.configureClient(KitWsApi)
for kit in kit_api.getKitList():
for c in kit.getComponentList():
if osinfo and osinfo not in c.getOsInfoList() and \
osinfo.getOsFamilyInfo() not in c.getOsFamilyInfoList():
# Exclude those components that cannot be enabled on the
# specified operating system.
continue
displayComponent(c, kit)
def main():
GetComponentList().run()
|
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
import datetime as dt
from dateutil.relativedelta import relativedelta
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///./Resources/hawaii.sqlite")
# Reflect an existing database into a new model.
Base = automap_base()
# Reflect the tables.
Base.prepare(engine, reflect=True)
# Save reference to the tables.
Measurement = Base.classes.measurement
Station = Base.classes.station
# print(Base.classes.keys())
#################################################
# Flask Setup
#################################################
app = Flask(__name__,static_url_path='/Images/surfs-up.png')
#################################################
# Flask Routes
#################################################
# Set the home page,and List all routes that are available. For easy to use I hyperlink the list
@app.route("/")
def welcome():
"""Available API routes."""
return (
f"<h1>SQL-Alchemy Challenge Step 2 Climate App</h1>"
f"<h1>This is a Flask API for Climate Analysis</h1>"
f"<br/><br/>"
f"<h2>Available API routes:</h2>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start/end<br/>"
f"<h2>Here are the hyperlinked routes you can click to see different pages:</h2>"
f"<ol><li><a href=http://127.0.0.1:5000/api/v1.0/precipitation>"
f"JSON list of precipitation amounts by date for the most recent year of data available</a></li><br/><br/>"
f"<li><a href=http://127.0.0.1:5000/api/v1.0/stations>"
f"JSON list of weather stations and their details</a></li><br/><br/>"
f"<li><a href=http://127.0.0.1:5000/api/v1.0/tobs>"
f"JSON list of the last 12 months of recorded temperatures</a></li><br/><br/>"
f"<li><a href=http://127.0.0.1:5000/api/v1.0/2017-08-23>"
f"When given the start date (YYYY-MM-DD), calculates the minimum, average, and maximum temperature for all dates greater than and equal to the start date</a></li><br/><br/>"
f"<li><a href=http://127.0.0.1:5000/api/v1.0/2016-08-23/2017-08-23>"
f"When given the start and the end date (YYYY-MM-DD), calculate the minimum, average, and maximum temperature for dates between the start and end date</a></li></ol><br/>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
"""Query to retrieve the last 12 months of precipitation data and return the results."""
# Create our session (link) from Python to the DB.
session = Session(engine)
# Calculate the date 1 year ago from the last data point in the database.
last_measurement_data_point_tuple = session.query(
Measurement.date).order_by(Measurement.date.desc()).first()
(latest_date, ) = last_measurement_data_point_tuple
latest_date = dt.datetime.strptime(latest_date, '%Y-%m-%d')
latest_date = latest_date.date()
date_year_ago = latest_date - relativedelta(years=1)
# Perform a query to retrieve the data and precipitation scores.
data_from_last_year = session.query(Measurement.date, Measurement.prcp).filter(
Measurement.date >= date_year_ago).all()
session.close()
# Convert the query results to a dictionary using date as the key and prcp as the value.
all_precipication = []
for date, prcp in data_from_last_year:
if prcp != None:
precip_dict = {}
precip_dict[date] = prcp
all_precipication.append(precip_dict)
# Return the JSON representation of dictionary.
return jsonify(all_precipication)
@app.route("/api/v1.0/tobs")
def tobs():
"""Query for the dates and temperature observations from a year from the last data point for the most active station."""
# Create our session (link) from Python to the DB.
session = Session(engine)
# Calculate the date 1 year ago from the last data point in the database.
last_measurement_data_point_tuple = session.query(
Measurement.date).order_by(Measurement.date.desc()).first()
(latest_date, ) = last_measurement_data_point_tuple
latest_date = dt.datetime.strptime(latest_date, '%Y-%m-%d')
latest_date = latest_date.date()
date_year_ago = latest_date - relativedelta(years=1)
# Find the most active station.
most_active_station = session.query(Measurement.station).\
group_by(Measurement.station).\
order_by(func.count().desc()).\
first()
# Get the station id of the most active station.
(most_active_station_id, ) = most_active_station
print(
f"The station id of the most active station is {most_active_station_id}.")
# Perform a query to retrieve the data and temperature scores for the most active station from the last year.
data_from_last_year = session.query(Measurement.date, Measurement.tobs).filter(
Measurement.station == most_active_station_id).filter(Measurement.date >= date_year_ago).all()
session.close()
# Convert the query results to a dictionary using date as the key and temperature as the value.
all_temperatures = []
for date, temp in data_from_last_year:
if temp != None:
temp_dict = {}
temp_dict[date] = temp
all_temperatures.append(temp_dict)
# Return the JSON representation of dictionary.
return jsonify(all_temperatures)
@app.route("/api/v1.0/stations")
def stations():
"""Return a JSON list of stations from the dataset."""
# Create our session (link) from Python to the DB
session = Session(engine)
# Query for stations.
stations = session.query(Station.station, Station.name,
Station.latitude, Station.longitude, Station.elevation).all()
session.close()
# Convert the query results to a dictionary.
all_stations = []
for station, name, latitude, longitude, elevation in stations:
station_dict = {}
station_dict["station"] = station
station_dict["name"] = name
station_dict["latitude"] = latitude
station_dict["longitude"] = longitude
station_dict["elevation"] = elevation
all_stations.append(station_dict)
# Return the JSON representation of dictionary.
return jsonify(all_stations)
@app.route('/api/v1.0/<start>', defaults={'end': None})
@app.route("/api/v1.0/<start>/<end>")
def determine_temps_for_date_range(start, end):
"""Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range."""
"""When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date."""
"""When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive."""
# Create our session (link) from Python to the DB.
session = Session(engine)
# If we have both a start date and an end date.
if end != None:
temperature_data = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).filter(
Measurement.date <= end).all()
# If we only have a start date.
else:
temperature_data = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).all()
session.close()
# Convert the query results to a list.
temperature_list = []
no_temperature_data = False
for min_temp, avg_temp, max_temp in temperature_data:
if min_temp == None or avg_temp == None or max_temp == None:
no_temperature_data = True
temperature_list.append(min_temp)
temperature_list.append(avg_temp)
temperature_list.append(max_temp)
# Return the JSON representation of dictionary.
if no_temperature_data == True:
return f"No temperature data found for the given date range. Try another date range."
else:
return jsonify(temperature_list)
if __name__ == '__main__':
app.run(debug=True)
|
# Задача 6. Вариант 23
#1-50. Разработайте систему начисления очков для задачи 6, в соответствии с которой игрок получал бы большее количество баллов за меньшее количество попыток.
# Сароквашин Максим
# 13.05.2016
import random
print("Компьютер загадал название одного из семи дней недели, а Вы должны его угадать.\n")
days = ('Понедельник','Вторник','Срела','Четверг','Пятница','Суббота','Воскресенье')
day = random.randint(0,6)
x = 0
i = 0
score = 0
while(x != 7):
print(day[x])
x += 1
answer = input("\nВведите день: ")
while(answer != days[day]):
print("Неверно, попробуйте ещё раз.")
answer = input("\nВведите день: ")
i += 1
if i == 0:
score = 10
elif 0<i<6:
score = 10 - i*2
else:
score = 0
print("Верно, Вы победили!")
print("Число попыток: "+str(i))
print("Вы заработали "+str(score)+" баллов")
input("\nДля выхода нажмите Enter.")
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gslib.help_provider import HELP_NAME
from gslib.help_provider import HELP_NAME_ALIASES
from gslib.help_provider import HELP_ONE_LINE_SUMMARY
from gslib.help_provider import HelpProvider
from gslib.help_provider import HELP_TEXT
from gslib.help_provider import HelpType
from gslib.help_provider import HELP_TYPE
_detailed_help_text = ("""
<B>OVERVIEW</B>
We're open to incorporating gsutil code changes authored by users. Here
are some guidelines:
1. Before we can accept code submissions, we have to jump a couple of legal
hurdles. Please fill out either the individual or corporate Contributor
License Agreement:
- If you are an individual writing original source code and you're
sure you own the intellectual property,
then you'll need to sign an individual CLA
(http://code.google.com/legal/individual-cla-v1.0.html).
- If you work for a company that wants to allow you to contribute your
work to gsutil, then you'll need to sign a corporate CLA
(http://code.google.com/legal/corporate-cla-v1.0.html)
Follow either of the two links above to access the appropriate CLA and
instructions for how to sign and return it. Once we receive it, we'll
add you to the official list of contributors and be able to accept
your patches.
2. If you found a bug or have an idea for a feature enhancement, we suggest
you check http://code.google.com/p/gsutil/issues/list to see if it has
already been reported by another user. From there you can also add yourself
to the Cc list for an issue, so you will find out about any developments.
3. It's usually worthwhile to send email to gs-team@google.com about your
idea before sending actual code. Often we can discuss the idea and help
propose things that could save you later revision work.
4. We tend to avoid adding command line options that are of use to only
a very small fraction of users, especially if there's some other way
to accommodate such needs. Adding such options complicates the code and
also adds overhead to users having to read through an "alphabet soup"
list of option documentation.
5. While gsutil has a number of features specific to Google Cloud Storage,
it can also be used with other cloud storage providers. We're open to
including changes for making gsutil support features specific to other
providers, as long as those changes don't make gsutil work worse for Google
Cloud Storage. If you do make such changes we recommend including someone
with knowledge of the specific provider as a code reviewer (see below).
6. You can check out the gsutil code from svn - see
http://code.google.com/p/gsutil/source/checkout. Then change directories
into gsutil/src, and check out the boto code from github:
git clone git://github.com/boto/boto.git
7. Please make sure to run all tests against your modified code. To
do this, change directories into the gsutil top-level directory and run:
./gsutil test
The above tests take a long time to run because they send many requests to
the production service. The gsutil test command has a -u argument that will
only run unit tests. These run quickly, as they are executed with an
in-memory mock storage service implementation. To run only the unit tests,
run:
./gsutil test -u
If you made mods to boto please run the boto tests. For these tests you
need to use HMAC credentials (from gsutil config -a), because the current
boto test suite doesn't import the OAuth2 handler. You'll also need to
install some python modules: change directories into the top-level gsutil
directory and run:
pip install -qr boto/requirements.txt
(You probably need to run this commad using sudo.)
Make sure each of the individual installations succeeded. If they don't
you may need to run individual ones again, e.g.,
pip install unittest2
Then ensure your .boto file has HMAC credentials defined (the boto tests
don't load the OAUTH2 plugin), and then change directories into boto/tests
and run:
python test.py unit
python test.py -t s3 -t gs -t ssl
8. Please consider contributing test code for your change, especially if the
change impacts any of the core gsutil code (like the gsutil cp command).
9. When it's time to send us code, please use the Rietveld code review tool
rather than simply sending us a code patch. Do this as follows:
- check out the gsutil code from at
http://code.google.com/p/gsutil/source/checkout and apply your changes
in the checked out directory.
- download the "upload.py" script from
http://code.google.com/p/rietveld/wiki/UploadPyUsage
- run upload.py from the above gsutil svn directory.
- click the codereview.appspot.com link it generates, click "Edit Issue",
and add mfschwartz@google.com as a reviewer, and Cc gs-team@google.com.
- click Publish+Mail Comments.
""")
class CommandOptions(HelpProvider):
"""Additional help about Access Control Lists."""
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : 'dev',
# List of help name aliases.
HELP_NAME_ALIASES : ['development', 'developer', 'code', 'mods',
'software'],
# Type of help:
HELP_TYPE : HelpType.ADDITIONAL_HELP,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : 'Making modifications to gsutil',
# The full help text.
HELP_TEXT : _detailed_help_text,
}
|
import typing
import logging
import operator
import pyfastaq
from cluster_vcf_records import vcf_file_read, vcf_record_cluster
class VcfClusterer:
"""Class to cluster one (or more) VCF files. Records in the VCf files
can be in any order.
Required parameters:
vcf_files: list of VCF files
reference_fasta: FASTA file of reference genome. Must be the same one
that was used to make the VCF files
vcf_outfile: name of output VCF file
Optional parameters:
cluster_boundary_size: Any variants that are within this distance of a cluster
start & end positional boundaries will be put in that cluster.
The default of 0 means that only overlapping variants are clustered.
homozygous_only: Set this to True to only load homozygous variants from
the input VCF files, ie where the genotype is 1/1.
max_REF_len: When loading the VCF files, any records with REF longer
than this will be ignored. By default, there is no maximum.
This option should not normally be needed. It was intended to
liimit the number of alleles, so use max_alleles_per_cluster instead
max_alleles_per_cluster: maximum allowed alleles in one cluster. If a cluster
has more than this, then combintations of SNPs are not generated.
Instead, each indel is used on its own to make ALTs,
and all SNPs are applied to make another ALT with all the SNPs.
source: this is put into the source=foo part of the header of the
output VCF file.
Use it like this:
clusterer = VcfCluster(vcf_files, reference_fasta, vcf_outfile[, options...])
clusterer.run()
"""
def __init__(
self,
vcf_files,
reference_fasta,
vcf_outfile,
cluster_boundary_size=0,
homozygous_only=False,
max_REF_len=None,
max_alleles_per_cluster=None,
source="cluster_vcf_records",
merge_method="gramtools",
max_gap_indel_rmdup=100,
):
self.vcf_files = vcf_files
self.reference_seqs = {}
pyfastaq.tasks.file_to_dict(reference_fasta, self.reference_seqs)
# If fasta header contains whitespace, add the first word as valid sequence ID (on top of whole line- pyfastaq module produced).
# This allows vcf files with CHROM field being only first word of fasta header, to be parsed correctly.
stripped_keys = {
key.split()[0]: self.reference_seqs[key] for key in self.reference_seqs
}
self.reference_seqs.update(stripped_keys)
for seq in self.reference_seqs.values():
seq.seq = seq.seq.upper()
self.vcf_outfile = vcf_outfile
self.cluster_boundary_size = cluster_boundary_size
self.homozygous_only = homozygous_only
self.source = source
self.max_REF_len = max_REF_len
self.max_alleles_per_cluster = max_alleles_per_cluster
self.merge_method = merge_method
self.max_gap_indel_rmdup = max_gap_indel_rmdup
allowed_merge_methods = {"gramtools", "simple", "gt_aware"}
if self.merge_method not in {"gramtools", "simple", "gt_aware"}:
raise RuntimeError(
'Error! merge_method "'
+ self.merge_method
+ '" not allowed. Must be one of: '
+ ",".join(sorted(list(allowed_merge_methods)))
)
@classmethod
def _load_vcf_files(
cls,
filename_list,
reference_seqs,
homozygous_only=False,
max_REF_len=None,
min_SNP_qual=None,
min_dp4=None,
min_GT_conf=None,
):
"""Loads all the vcf files from filename_list. Returns tuple of:
1. Sample name. If more than one sample name found, uses the first one
and warns to stderr
2. Dictionary. filename => list of header lines for that file
3. Dictionary. ref name => list of VcfRecords sorted by position.
reference_seqs should be a dictionary of sequence name -> sequence.
This causes all records from the VCF to be sanity checked against the reference sequence,
and any records where the REF seq does not match the expected sequence is removed."""
headers = {}
vcf_records = None
sample_name = None
for filename in filename_list:
headers[filename], new_records = vcf_file_read.vcf_file_to_dict(
filename,
homozygous_only=homozygous_only,
remove_asterisk_alts=True,
max_REF_len=max_REF_len,
remove_useless_start_nucleotides=True,
min_SNP_qual=min_SNP_qual,
min_dp4=min_dp4,
min_GT_conf=min_GT_conf,
reference_seqs=reference_seqs,
error_on_bad_POS=False,
)
new_sample_name = vcf_file_read.get_sample_name_from_vcf_header_lines(
headers[filename]
)
if sample_name is None and new_sample_name is not None:
sample_name = new_sample_name
elif new_sample_name != sample_name:
logging.warning(
'Using first sample name found "'
+ str(sample_name)
+ '". Found a different (or no) sample name "'
+ str(new_sample_name)
+ '", which will not be used'
)
if vcf_records is None:
vcf_records = new_records
else:
for ref_name, record_list in new_records.items():
if ref_name not in vcf_records:
vcf_records[ref_name] = record_list
else:
vcf_records[ref_name].extend(record_list)
for record_list in vcf_records.values():
record_list.sort(key=operator.attrgetter("POS"))
if sample_name is None:
logging.warning('No sample name found in VCF files. Going to use "sample"')
sample_name = "sample"
return sample_name, headers, vcf_records
@classmethod
def _expand_alts_in_vcf_record_list(cls, vcf_records):
"""Input: list of vcf_records. Returns new list, where
any records with >ALT is replaced with one vcf record per ALT.
This doesn't change FORMAT or INFO columns, which means they
are now broken for those records"""
new_vcf_records = []
for record in vcf_records:
new_vcf_records.extend(record.to_record_per_alt())
return new_vcf_records
@classmethod
def _expand_alts_and_remove_duplicates_in_list(
cls, vcf_records, ref_seq, indel_gap=100
):
"""Input: list of VCF records, all from the same CHROM. ref_seq = sequence
of that CHROM. Expands any record in the list that has >ALT, into
one record per ALT. Removes duplicated records, where REF and ALT
are the same (at the same position!), or where there is the same
indel more than once, but written in a different way (eg indel in
homopolymer run can be put in >1 way in a VCF. Checks indels
are the same within indel_gap nucleotides of each other"""
expanded_vcf_records = VcfClusterer._expand_alts_in_vcf_record_list(vcf_records)
new_vcf_records = [x for x in expanded_vcf_records if not x.is_snp()]
# Because the routine below is worse-case quadratic, we refuse to do it if it implies > ~100 million calls
if len(new_vcf_records) > 10000:
return expanded_vcf_records
for i in range(len(new_vcf_records) - 1):
j = i + 1
while (
j < len(new_vcf_records)
and new_vcf_records[i].ref_end_pos() + indel_gap
> new_vcf_records[j].POS
):
if new_vcf_records[i].is_the_same_indel(new_vcf_records[j], ref_seq):
new_vcf_records.pop(j)
else:
j += 1
new_vcf_records.extend([x for x in expanded_vcf_records if x.is_snp()])
new_vcf_records.sort(key=operator.attrgetter("POS"))
return new_vcf_records
@classmethod
def _cluster_vcf_record_list(cls, vcf_records, cluster_boundary_size=0):
new_cluster_list = [
vcf_record_cluster.VcfRecordCluster(
cluster_boundary_size=cluster_boundary_size
)
]
# We try adding each vcf_record to the lastmost cluster; if this fails, we put it in a new cluster of its own.
for vcf_record in vcf_records:
last_cluster = new_cluster_list[-1]
successfully_added = last_cluster.add_vcf_record(vcf_record)
if not successfully_added: # Make a new cluster
new_cluster = vcf_record_cluster.VcfRecordCluster(
vcf_record=vcf_record, cluster_boundary_size=cluster_boundary_size,
)
new_cluster_list.append(new_cluster)
return new_cluster_list
def run(self):
sample_name, vcf_headers, vcf_records = VcfClusterer._load_vcf_files(
self.vcf_files,
self.reference_seqs,
homozygous_only=self.homozygous_only,
max_REF_len=self.max_REF_len,
)
f_out = pyfastaq.utils.open_file_write(self.vcf_outfile)
print("##fileformat=VCFv4.2", file=f_out)
print("##source=", self.source, sep="", file=f_out)
print(
"#CHROM",
"POS",
"ID",
"REF",
"ALT",
"QUAL",
"FILTER",
"INFO",
"FORMAT",
sample_name,
sep="\t",
file=f_out,
)
for ref_name in sorted(vcf_records):
ref_seq = self.reference_seqs[ref_name]
if self.merge_method == "gramtools":
rmdup_list = VcfClusterer._expand_alts_and_remove_duplicates_in_list(
vcf_records[ref_name], ref_seq, indel_gap=self.max_gap_indel_rmdup
)
cluster_list = VcfClusterer._cluster_vcf_record_list(
rmdup_list, cluster_boundary_size=self.cluster_boundary_size,
)
for cluster in cluster_list:
if len(cluster) > 0:
clustered_vcf = cluster.make_one_merged_vcf_record_for_gramtools(
ref_seq, max_alleles=self.max_alleles_per_cluster
)
if clustered_vcf is not None:
print(clustered_vcf, file=f_out)
else:
merged_record = cluster.make_separate_indels_and_one_alt_with_all_snps_no_combinations(
ref_seq
)
if merged_record is not None:
print(merged_record, file=f_out)
elif self.merge_method == "simple":
cluster_list = VcfClusterer._cluster_vcf_record_list(
vcf_records[ref_name],
cluster_boundary_size=self.cluster_boundary_size,
)
for cluster in cluster_list:
clustered_vcf = cluster.make_simple_merged_vcf_with_no_combinations(
ref_seq
)
for vcf in cluster.vcf_records:
print(vcf, file=f_out)
elif self.merge_method == "gt_aware":
cluster_list = VcfClusterer._cluster_vcf_record_list(
vcf_records[ref_name],
cluster_boundary_size=self.cluster_boundary_size,
)
for cluster in cluster_list:
clustered_vcf = cluster.make_simple_gt_aware_merged_vcf_with_no_combinations(
ref_seq
)
for vcf in cluster.vcf_records:
print(vcf, file=f_out)
else:
raise RuntimeError(
'merge_method "'
+ self.merge_method
+ '" not recognised. Cannot continue'
)
pyfastaq.utils.close(f_out)
def cluster(
input_vcf_file_paths: typing.List[str],
reference_file_path: str,
output_vcf_file_path: str,
**kw
):
_vcf_cluster = VcfClusterer(
input_vcf_file_paths, reference_file_path, output_vcf_file_path, **kw
)
return _vcf_cluster.run()
|
import matplotlib.pyplot as plt
import numpy as np
from time import time
import pandas as pd
from scipy.stats import norm
# Underlying informations
S0 = 100.0
sigma = 0.2
# European option informations
T = 1.0
K = 100.0
r = 0.05
# Simulation parameters
nbr_steps = 100
dt = T/nbr_steps
t = np.linspace(0, T, nbr_steps)
nbr_sim = 1000000
# parameters for greek calculation
seed = 2
dS = 1/S0
d_sigma = sigma/100
# European call price and greeks according to Black-Scholes
def d1():
return (np.log(S0/K)+(r+0.5*sigma**2)*T)/(sigma*np.sqrt(T))
def d2():
return d1() - sigma*np.sqrt(T)
def price_BS(S0):
return S0*norm.cdf(d1())-K*np.exp(-r*T)*norm.cdf(d2())
def delta_BS():
return norm.cdf(d1())
def gamma_BS():
return norm.pdf(d1())/(S0*sigma*np.sqrt(T))
def vega_BS():
return S0*np.sqrt(T)*norm.pdf(d1())
# Monte-Carlo pricing and greeks
def price_MC(S0, sigma):
# Setting the seed in order to get the same results
np.random.seed(seed)
price = 0.0
for _ in range(nbr_sim):
W = np.random.standard_normal(size = nbr_steps)
W = np.cumsum(W)*np.sqrt(dt)
X = (r-0.5*sigma**2)*t + sigma*W
S = S0*np.exp(X)
# Payoff computation of a european call
if(S[-1]>K):
price += S[-1]-K
return (price/nbr_sim)*np.exp(-r*T)
def delta_MC(dS):
p_S = price_MC(S0, sigma)
p_S_dS = price_MC(S0+dS, sigma)
return (p_S_dS - p_S)/dS
def gamma_MC(dS):
p_m_dS = price_MC(S0-dS, sigma)
p_S = price_MC(S0, sigma)
p_S_dS = price_MC(S0+dS, sigma)
return (p_m_dS - 2*p_S + p_S_dS)/dS**2
def vega_MC(d_sigma):
p_sigma = price_MC(S0, sigma)
p_d_sigma = price_MC(S0, sigma+d_sigma)
return (p_d_sigma - p_sigma)/d_sigma
# Testing
delta_bs, delta_mc = delta_BS(), delta_MC(dS)
print('Delta : \nTheorical value : {} ; Monte-Carlo value : {} ; Error : {} %'
.format(delta_bs, delta_mc, 100*np.round(np.abs(delta_mc - delta_bs)/delta_bs, 5)))
gamma_bs, gamma_mc = gamma_BS(), gamma_MC(dS)
print('Gamma : \nTheorical value : {} ; Monte-Carlo value : {} ; Error : {} %'
.format(gamma_bs, gamma_mc, 100*np.round(np.abs(gamma_mc - gamma_bs)/gamma_bs, 5)))
vega_bs, vega_mc = vega_BS(), vega_MC(dS)
print('Vega : \nTheorical value : {} ; Monte-Carlo value : {} ; Error : {} %'
.format(vega_bs, vega_mc, 100*np.round(np.abs(vega_mc - vega_bs)/vega_bs, 5)))
input('Press enter to continue...')
|
def resolve_tasks(taskset, manifest):
import tasks
taskset.add(tasks.EnableInsecureAccess)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import azure.cli.core.context as ctx
from azure.cli.core._util import CLIError
from azure.cli.core.cloud import CloudNotRegisteredException
def list_contexts():
return ctx.get_contexts()
def activate_context(context_name):
try:
ctx.set_active_context(context_name)
except ctx.ContextNotFoundException as e:
raise CLIError(e)
def delete_context(context_name):
try:
ctx.delete_context(context_name)
except ctx.ContextNotFoundException as e:
raise CLIError(e)
except ctx.CannotDeleteDefaultContextException as e:
raise CLIError(e)
except ctx.CannotDeleteActiveContextException as e:
raise CLIError(e)
def create_context(context_name, cloud_name, use_later=False):
try:
ctx.create_context(context_name, cloud_name)
except ctx.ContextExistsException as e:
raise CLIError(e)
except CloudNotRegisteredException as e:
raise CLIError(e)
if not use_later:
ctx.set_active_context(context_name)
def show_contexts(context_name=None):
if not context_name:
context_name = ctx.get_active_context_name()
try:
return ctx.get_context(context_name)
except ctx.ContextNotFoundException as e:
raise CLIError(e)
def modify_context(context_name=None, cloud_name=None, default_subscription=None):
if not context_name:
context_name = ctx.get_active_context_name()
try:
ctx.modify_context(context_name,
cloud_name=cloud_name,
default_subscription=default_subscription)
except ctx.ContextExistsException as e:
raise CLIError(e)
except CloudNotRegisteredException as e:
raise CLIError(e)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
r"""Generator class
Args:
model_dim(int): dimension of the model
vocab(int): vocab size
"""
def __init__(self, model_dim, vocab):
super(Generator, self).__init__()
self.dense = nn.Linear(model_dim, vocab)
def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
x = self.dense(x)
# Calculate probability distribution
x = F.log_softmax(x, dim=1)
return x
|
from algo.mappo.elements.loss import create_loss
|
#
# Copyright 2013-2021 [The Foundry Visionmongers Ltd]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @namespace openassetio.hostAPI
# This module contains code relevant to anyone hosting the API in a tool or application,
# wanting to communicate with some asset management system.
#
# If you are wanting to provide support for an asset management system, @see openassetio.managerAPI
#
from .HostInterface import HostInterface
from .Manager import Manager
from .ManagerFactoryInterface import ManagerFactoryInterface
from .Session import Session
|
from bs4 import BeautifulSoup
import time
from getpass import getpass
import colorama
from colorama import Fore
import subprocess
import os
import sys
from datetime import datetime
import re
import urllib
import webbrowser
from getpass import getpass
import requests
from pync import Notifier
f = open('SourceCode/UserAgent.txt', 'r')
UserAgentID = f.read()
f.close()
f = open('SourceCode/StashID.txt', 'r')
StashID = f.read()
f.close()
#date and time imports
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print(Fore.WHITE + current_time)
print(Fore.WHITE + 'AmazonWebScraper 1.0.0\n' + Fore.YELLOW + 'by AccendWeb\n')
print(Fore.WHITE + 'Welcome',StashID + '!')
print(Fore.YELLOW + '1. run')
main_menu = input(':')
if main_menu == ('1'):
URLinput = input('insert url')
while True:
def StockChecker():
#Amazon
URL= URLinput
headers = {"User-Agent": UserAgentID}
page = requests.get(URL, headers=headers)
soup = BeautifulSoup(page.content,'html.parser')
title = soup.find(id="productTitle").get_text()
stock = soup.find(id="availability").get_text()
if 'Currently unavailable.' in stock:
print(Fore.GREEN + title.strip())
print(Fore.RED + "Currently unavailable, stock TBD")
if 'In stock on' in stock:
price = soup.find(id="priceblock_ourprice").get_text()
StockStrip = stock.strip()
StockStriped = StockStrip.strip('Order it now.')
print(Fore.RED + StockStriped)
if 'In stock.' in stock:
price = soup.find(id="priceblock_ourprice").get_text()
print(Fore.GREEN + title.strip())
print(Fore.GREEN + price)
print(Fore.GREEN + stock.strip())
Notifier.notify('item in stock', title='Stash')
if 'Only' in stock:
print(Fore.GREEN + title.strip())
print(Fore.RED + stock.strip())
Notifier.notify(stock.strip(), title='Stash')
if '(more on the way)' in stock:
print(Fore.GREEN + title.strip())
print(Fore.RED + stock.strip())
Notifier.notify(stock.strip(), title='Stash')
if 'Available from these sellers.' in stock:
print(Fore.GREEN + title.strip())
print(Fore.RED + stock.strip())
StockChecker()
|
import os
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class Sample:
annotation_label_name: Optional[str]
prediction_label_name: Optional[str]
prediction_label_confidence: Optional[float]
sample_file: str
@dataclass
class Function:
name: Optional[str]
modality: str
samples: List[Sample]
label_names: List[str]
|
import os
import pickle
import numpy as np
from tensorflow import keras
class MyPredictor(object):
"""An example Predictor for an AI Platform custom prediction routine."""
def __init__(self, model, embedder):
"""Stores artifacts for prediction. Only initialized via `from_path`.
"""
self._model_clf = model
self._model_emb = embedder
def predict(self, instances, **kwargs):
"""Performs custom prediction.
Preprocesses inputs, then performs prediction using the trained Keras
model.
Args:
instances: A list of prediction input instances.
**kwargs: A dictionary of keyword args provided as additional
fields on the predict request body.
Returns:
A list of outputs containing the prediction results.
"""
inputs = np.asarray(instances)
embedding = self._model_emb.predict(inputs)
outputs = self._model_clf.predict(embedding)
return outputs.tolist()
@classmethod
def from_path(cls, model_dir):
"""Creates an instance of MyPredictor using the given path.
This loads artifacts that have been copied from your model directory in
Cloud Storage. MyPredictor uses them during prediction.
Args:
model_dir: The local directory that contains the trained Keras
model and the pickled preprocessor instance. These are copied
from the Cloud Storage model directory you provide when you
deploy a version resource.
Returns:
An instance of `MyPredictor`.
"""
model_path = os.path.join(model_dir, 'race_model.h5')
model = keras.models.load_model(model_path)
embedder_path = os.path.join(model_dir, 'temp_embs.h5')
embedder = keras.models.load_model(embedder_path)
return cls(model, embedder)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.